diff --git a/go.mod b/go.mod index a11f656eac4..bf1edcf0735 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/containers/buildah // Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates -go 1.24.6 +go 1.25.0 require ( github.com/containerd/platforms v1.0.0-rc.2 @@ -29,7 +29,7 @@ require ( github.com/opencontainers/selinux v1.13.1 github.com/openshift/imagebuilder v1.2.19 github.com/seccomp/libseccomp-golang v0.11.1 - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af + github.com/sirupsen/logrus v1.9.4-0.20251023124752-b61f268f75b6 github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 @@ -41,7 +41,7 @@ require ( golang.org/x/sync v0.19.0 golang.org/x/sys v0.39.0 golang.org/x/term v0.38.0 - tags.cncf.io/container-device-interface v1.0.2-0.20251120202831-139ffec09210 + tags.cncf.io/container-device-interface v1.1.0 ) require ( @@ -53,6 +53,9 @@ require ( github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect @@ -61,11 +64,13 @@ require ( github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containernetworking/plugins v1.8.0 // indirect + github.com/containernetworking/plugins v1.9.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/coreos/go-systemd/v22 v22.6.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker-credential-helpers v0.9.4 // indirect @@ -74,18 +79,44 @@ require ( github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/godbus/dbus/v5 v5.2.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-containerregistry v0.20.6 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/compress v1.18.2 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect @@ -104,38 +135,55 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.6 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/sigstore/fulcio v1.8.1 // indirect github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/sigstore v1.9.6-0.20251111174640-d8ab8afb1326 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/sigstore v1.10.0 // indirect + github.com/sigstore/sigstore-go v1.1.4 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/sylabs/sif/v2 v2.22.0 // indirect github.com/tchap/go-patricia/v2 v2.3.3 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect github.com/ulikunitz/xz v0.5.15 // indirect github.com/vbatts/tar-split v0.12.2 // indirect github.com/vbauerster/mpb/v8 v8.11.2 // indirect github.com/vishvananda/netlink v1.3.1 // indirect github.com/vishvananda/netns v0.0.5 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect go.opentelemetry.io/otel/metric v1.38.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/mod v0.30.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/text v0.32.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect google.golang.org/grpc v1.76.0 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog v1.0.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect - tags.cncf.io/container-device-interface/specs-go v1.0.1-0.20251120202831-139ffec09210 // indirect + tags.cncf.io/container-device-interface/specs-go v1.1.0 // indirect ) + +replace go.podman.io/common => github.com/p5/containers-container-libs/common v0.0.0-20251224032800-1f0c30832b6a + +replace go.podman.io/storage => github.com/p5/containers-container-libs/storage v0.0.0-20251224032800-1f0c30832b6a + +replace go.podman.io/image/v5 => github.com/p5/containers-container-libs/image/v5 v5.0.0-20251224032800-1f0c30832b6a diff --git a/go.sum b/go.sum index f754fedc397..0365ec01978 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,41 @@ +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= cyphar.com/go-pathrs v0.2.1 h1:9nx1vOgwVvX1mNBWDu93+vaceedpbsDqo+XuBGL40b8= cyphar.com/go-pathrs v0.2.1/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= @@ -18,8 +48,46 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk= +github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/config v1.31.20 h1:/jWF4Wu90EhKCgjTdy1DGxcbcbNrjfBHvksEL79tfQc= +github.com/aws/aws-sdk-go-v2/config v1.31.20/go.mod h1:95Hh1Tc5VYKL9NJ7tAkDcqeKt+MCXQB1hQZaRdJIZE0= +github.com/aws/aws-sdk-go-v2/credentials v1.18.24 h1:iJ2FmPT35EaIB0+kMa6TnQ+PwG5A1prEdAw+PsMzfHg= +github.com/aws/aws-sdk-go-v2/credentials v1.18.24/go.mod h1:U91+DrfjAiXPDEGYhh/x29o4p0qHX5HDqG7y5VViv64= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg= +github.com/aws/aws-sdk-go-v2/service/kms v1.48.2 h1:aL8Y/AbB6I+uw0MjLbdo68NQ8t5lNs3CY3S848HpETk= +github.com/aws/aws-sdk-go-v2/service/kms v1.48.2/go.mod h1:VJcNH6BLr+3VJwinRKdotLOMglHO8mIKlD3ea5c7hbw= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 h1:NjShtS1t8r5LUfFVtFeI8xLAHQNTa7UI0VawXlrBMFQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.3/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 h1:gTsnx0xXNQ6SBbymoDvcoRHL+q4l/dAFsQuKfDWSaGc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo= +github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 h1:HK5ON3KmQV2HcAunnx4sKLB9aPf3gKGwVAf7xnx0QT0= +github.com/aws/aws-sdk-go-v2/service/sts v1.40.2/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk= +github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM= +github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -35,6 +103,8 @@ github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfa github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= @@ -49,14 +119,16 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= -github.com/containernetworking/plugins v1.8.0 h1:WjGbV/0UQyo8A4qBsAh6GaDAtu1hevxVxsEuqtBqUFk= -github.com/containernetworking/plugins v1.8.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c= +github.com/containernetworking/plugins v1.9.0 h1:Mg3SXBdRGkdXyFC4lcwr6u2ZB2SDeL6LC3U+QrEANuQ= +github.com/containernetworking/plugins v1.9.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/luksy v0.0.0-20251120151536-e33b6d68eabe h1:0tA3LFemA19j82bVOJPK3n5PA818JqnhbFRsfqnIu+Y= github.com/containers/luksy v0.0.0-20251120151536-e33b6d68eabe/go.mod h1:SJ2DZmluHVrn7q63Lkfu8fuSFzJfs3Ig4jHrRLZlLGs= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= +github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow= +github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -66,16 +138,23 @@ github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v29.1.1+incompatible h1:gGQk5qx62yPKRm3bUdKBzmDBSQzp17hlSLbV1F7jjys= -github.com/docker/cli v29.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.1.3+incompatible h1:+kz9uDWgs+mAaIZojWfFt4d53/jv0ZUOOoSh5ZnH36c= +github.com/docker/cli v29.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= @@ -92,6 +171,9 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fsouza/go-dockerclient v1.12.3 h1:CEsX4/msyMEekHAR9Pf8XniZBtwGo0Kl+mLPQ/AnSys= github.com/fsouza/go-dockerclient v1.12.3/go.mod h1:gl0t2KUfrsLbm4tw5/ySsJkkFpi7Fz9gXzY2BKLEvZA= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -100,52 +182,160 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus/v5 v5.2.0 h1:3WexO+U+yg9T70v9FdHr9kCxYlazaAXUhx2VMkbfax8= github.com/godbus/dbus/v5 v5.2.0/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= -github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8= +github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= @@ -158,6 +348,10 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v4 v4.0.0 h1:sU0+5dX45tdDK5xNZ3HBi95nxUc48FS92qbIZEvpAg4= github.com/mistifyio/go-zfs/v4 v4.0.0/go.mod h1:weotFtXTHvBwhr9Mv96KYnDkTPBOHFUbm9cBmQpesL0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/buildkit v0.26.2 h1:EIh5j0gzRsCZmQzvgNNWzSDbuKqwUIiBH7ssqLv8RU8= github.com/moby/buildkit v0.26.2/go.mod h1:ylDa7IqzVJgLdi/wO7H1qLREFQpmhFbw2fbn4yoTw40= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -192,10 +386,14 @@ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFd github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= +github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= github.com/opencontainers/cgroups v0.0.6 h1:tfZFWTIIGaUUFImTyuTg+Mr5x8XRiSdZESgEBW7UxuI= github.com/opencontainers/cgroups v0.0.6/go.mod h1:oWVzJsKK0gG9SCRBfTpnn16WcGEqDI8PAcpMGbqWxcs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -212,6 +410,14 @@ github.com/opencontainers/selinux v1.13.1 h1:A8nNeceYngH9Ow++M+VVEwJVpdFmrlxsN22 github.com/opencontainers/selinux v1.13.1/go.mod h1:S10WXZ/osk2kWOYKy1x2f/eXF5ZHJoUs8UU/2caNRbg= github.com/openshift/imagebuilder v1.2.19 h1:Xqq36KMJgsRU2MPaLRML23Myvk+AaY8pE8VJ6m6Vmy4= github.com/openshift/imagebuilder v1.2.19/go.mod h1:fdbnfQWjxMBoB/jrvEzUk+UT1zqvtZZj7oQ7GU6RD9I= +github.com/p5/containers-container-libs/common v0.0.0-20251224032800-1f0c30832b6a h1:Z06TcE0zntWz6a3+g0BHOjBh49DVAHFhyV5yW5OpDyY= +github.com/p5/containers-container-libs/common v0.0.0-20251224032800-1f0c30832b6a/go.mod h1:byIL3LjaUE+Pt//N2B+4QUnRHVFCwGa5LJm/ChruUwE= +github.com/p5/containers-container-libs/image/v5 v5.0.0-20251224032800-1f0c30832b6a h1:YVGIlrwiWsTi67Ll5RRs33zOUsF4up453ldZuGoa+Pg= +github.com/p5/containers-container-libs/image/v5 v5.0.0-20251224032800-1f0c30832b6a/go.mod h1:j5kfTAQj/suPy1ddbvFhh4OExOJ+ylJOUKmr3LQLQ0Q= +github.com/p5/containers-container-libs/storage v0.0.0-20251224032800-1f0c30832b6a h1:QujnSaK1jaRLm90m2vKzF1QjP7pbjeITmL+hPYnBqyo= +github.com/p5/containers-container-libs/storage v0.0.0-20251224032800-1f0c30832b6a/go.mod h1:Z4Wvuj8fy+zTePj4wzBveklhs2CnlfcBgrUMW+bFvos= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= @@ -224,8 +430,14 @@ github.com/proglottis/gpgme v0.1.6/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glE github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= github.com/sebdah/goldie/v2 v2.7.1 h1:PkBHymaYdtvEkZV7TmyqKxdmn5/Vcj+8TpATWZjnG5E= github.com/sebdah/goldie/v2 v2.7.1/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.11.1 h1:wuk4ZjSx6kyQII4rj6G6fvVzRHQaSiPvccJazDagu4g= @@ -234,14 +446,32 @@ github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/sigstore/fulcio v1.8.1 h1:PmoQv3XmhjR2BWFWw5LcMUXJPmhyizOIL7HeYnpio58= github.com/sigstore/fulcio v1.8.1/go.mod h1:7tP3KW9eCGlPYRj5N4MSuUOat7CkeIHuXZ2jAUQ+Rwc= github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/sigstore v1.9.6-0.20251111174640-d8ab8afb1326 h1:s39MsSDVn8LhePV5adidcOjjKHaplLxpHM1mvbC24l4= -github.com/sigstore/sigstore v1.9.6-0.20251111174640-d8ab8afb1326/go.mod h1:xSCb7eki7lCdi+mNh4I4MVpKPP2cWGtDYmSPPmX/K70= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4 h1:wTTsgCHOfqiEzVyBYA6mDczGtBkN7cM8mPpjJj5QvMg= +github.com/sigstore/sigstore-go v1.1.4/go.mod h1:2U/mQOT9cjjxrtIUeKDVhL+sHBKsnWddn8URlswdBsg= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.9.4-0.20251023124752-b61f268f75b6 h1:JEH4GlfEqachL/BWMxNEqYZGst5AGInBdLF/bEx+dZY= +github.com/sirupsen/logrus v1.9.4-0.20251023124752-b61f268f75b6/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= @@ -253,13 +483,30 @@ github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLy github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/sylabs/sif/v2 v2.22.0 h1:Y+xXufp4RdgZe02SR3nWEg7S6q4tPWN237WHYzkDSKA= github.com/sylabs/sif/v2 v2.22.0/go.mod h1:W1XhWTmG1KcG7j5a3KSYdMcUIFvbs240w/MMVW627hs= github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc= github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= @@ -279,10 +526,16 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= @@ -301,12 +554,14 @@ go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJr go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= -go.podman.io/common v0.66.1-0.20251202202200-df55d6c661e8 h1:X/HMawr2P5VtTICpL57wJLRpYQrLDSMOwNKKusJ4tVI= -go.podman.io/common v0.66.1-0.20251202202200-df55d6c661e8/go.mod h1:wpnuqmlKx7El9QgMIMJiZq9SM+InftBZF4QUlEKGAZI= -go.podman.io/image/v5 v5.38.1-0.20251202202200-df55d6c661e8 h1:JKM6NKkbcq+HpAtYlnWwugOCM08aUKE5nbI0aNIDSIQ= -go.podman.io/image/v5 v5.38.1-0.20251202202200-df55d6c661e8/go.mod h1:bXKvkbQgtxD3hibVOhAeGANyKPqo6lVwdwOxt93+V40= -go.podman.io/storage v1.61.1-0.20251202202200-df55d6c661e8 h1:1zf8nDepbe13RUhjdgj2UIQRHAUZlDohNZbhQBZVnm0= -go.podman.io/storage v1.61.1-0.20251202202200-df55d6c661e8/go.mod h1:izI0U2FxSpqivwmkVbTO7ECcoER6FnM6ZL7CFPu0Tfs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= @@ -321,6 +576,8 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -343,6 +600,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -363,7 +622,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -415,10 +673,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= @@ -426,18 +688,21 @@ google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -tags.cncf.io/container-device-interface v1.0.2-0.20251120202831-139ffec09210 h1:ucIvxFr8UEFjsROkGrjxb3BKqZZpfifkRT9nLgeMD9U= -tags.cncf.io/container-device-interface v1.0.2-0.20251120202831-139ffec09210/go.mod h1:kIlIMADdgOVbyLj4ZvEtCvHXqFXqxfbVKKKgBZt8NgQ= -tags.cncf.io/container-device-interface/specs-go v1.0.1-0.20251120202831-139ffec09210 h1:SDIHrIFfJP54QHSdPS0VfwcVYodmkp6y/OPL/ceoejs= -tags.cncf.io/container-device-interface/specs-go v1.0.1-0.20251120202831-139ffec09210/go.mod h1:u86hoFWqnh3hWz3esofRFKbI261bUlvUfLKGrDhJkgQ= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +tags.cncf.io/container-device-interface v1.1.0 h1:RnxNhxF1JOu6CJUVpetTYvrXHdxw9j9jFYgZpI+anSY= +tags.cncf.io/container-device-interface v1.1.0/go.mod h1:76Oj0Yqp9FwTx/pySDc8Bxjpg+VqXfDb50cKAXVJ34Q= +tags.cncf.io/container-device-interface/specs-go v1.1.0 h1:QRZVeAceQM+zTZe12eyfuJuuzp524EKYwhmvLd+h+yQ= +tags.cncf.io/container-device-interface/specs-go v1.1.0/go.mod h1:u86hoFWqnh3hWz3esofRFKbI261bUlvUfLKGrDhJkgQ= diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore new file mode 100644 index 00000000000..8d69a9418aa --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.gitignore @@ -0,0 +1,15 @@ +bin/ +.idea/ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 00000000000..bb83c6670df --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,12 @@ +language: go +dist: xenial +go: + - '1.10' + - '1.11' + - '1.12' + - '1.13' + - 'tip' + +script: + - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..4b462b0d81b --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +This project adheres to [The Code Manifesto](http://codemanifesto.com) +as its guidelines for contributor interactions. + +## The Code Manifesto + +We want to work in an ecosystem that empowers developers to reach their +potential — one that encourages growth and effective collaboration. A space +that is safe for all. + +A space such as this benefits everyone that participates in it. It encourages +new developers to enter our field. It is through discussion and collaboration +that we grow, and through growth that we improve. + +In the effort to create such a place, we hold to these values: + +1. **Discrimination limits us.** This includes discrimination on the basis of + race, gender, sexual orientation, gender identity, age, nationality, + technology and any other arbitrary exclusion of a group of people. +2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort + levels. Remember that, and if brought to your attention, heed it. +3. **We are our biggest assets.** None of us were born masters of our trade. + Each of us has been helped along the way. Return that favor, when and where + you can. +4. **We are resources for the future.** As an extension of #3, share what you + know. Make yourself a resource to help those that come after you. +5. **Respect defines us.** Treat others as you wish to be treated. Make your + discussions, criticisms and debates from a position of respectfulness. Ask + yourself, is it true? Is it necessary? Is it constructive? Anything less is + unacceptable. +6. **Reactions require grace.** Angry responses are valid, but abusive language + and vindictive actions are toxic. When something happens that offends you, + handle it assertively, but be respectful. Escalate reasonably, and try to + allow the offender an opportunity to explain themselves, and possibly + correct the issue. +7. **Opinions are just that: opinions.** Each and every one of us, due to our + background and upbringing, have varying opinions. That is perfectly + acceptable. Remember this: if you respect your own opinions, you should + respect the opinions of others. +8. **To err is human.** You might not intend it, but mistakes do happen and + contribute to build experience. Tolerate honest mistakes, and don't + hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 00000000000..7ed268a1edd --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 00000000000..cacba910240 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2020 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 00000000000..2c3fc35eb64 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,622 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) +[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v10 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { + // ... +} + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { + // ... +}) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func HasLowerCase(str string) bool +func HasUpperCase(str string) bool +func HasWhitespace(str string) bool +func HasWhitespaceOnly(str string) bool +func InRange(value interface{}, left interface{}, right interface{}) bool +func InRangeFloat32(value, left, right float32) bool +func InRangeFloat64(value, left, right float64) bool +func InRangeInt(value, left, right interface{}) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCRC32(str string) bool +func IsCRC32b(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsExistingEmail(email string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHash(str string, algorithm string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO4217(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsIn(str string, params ...string) bool +func IsInRaw(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMD4(str string) bool +func IsMD5(str string) bool +func IsMagnetURI(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNotNull(str string) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRegex(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsRipeMD128(str string) bool +func IsRipeMD160(str string) bool +func IsRsaPub(str string, params ...string) bool +func IsRsaPublicKey(str string, keylen int) bool +func IsSHA1(str string) bool +func IsSHA256(str string) bool +func IsSHA384(str string) bool +func IsSHA512(str string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTiger128(str string) bool +func IsTiger160(str string) bool +func IsTiger192(str string) bool +func IsTime(str string, format string) bool +func IsType(v interface{}, params ...string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsULID(str string) bool +func IsUnixTime(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func MaxStringLength(str string, params ...string) bool +func MinStringLength(str string, params ...string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func PrependPathToErrors(err error, path string) error +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func SetNilPtrAllowedByRequired(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(value interface{}) (res int64, err error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func TruncatingErrorf(str string, args ...interface{}) error +func UnderscoreToCamelCase(s string) string +func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type ISO693Entry +type InterfaceParamValidator +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### IsType +```go +println(govalidator.IsType("Bob", "string")) +println(govalidator.IsType(1, "int")) +i := 1 +println(govalidator.IsType(&i, "*int")) +``` + +IsType can be used through the tag `type` which is essential for map validation: +```go +type User struct { + Name string `valid:"type(string)"` + Age int `valid:"type(int)"` + Meta interface{} `valid:"type(string)"` +} +result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +"ulid": IsULID, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"stringlength(min|max)": StringLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +"rsapub(keylength)" : IsRsaPub, +"minstringlength(int): MinStringLength, +"maxstringlength(int): MaxStringLength, +``` +Validators with parameters for any type + +```go +"type(type)": IsType, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + Message2 string `valid:"animal(dog)"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + Message2: "dog", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +// Add your own struct validation tags with parameter +govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { + species := params[0] + return str == species +}) +govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) +If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` + +So here is small example of usage: +```go +var mapTemplate = map[string]interface{}{ + "name":"required,alpha", + "family":"required,alpha", + "email":"required,email", + "cell-phone":"numeric", + "address":map[string]interface{}{ + "line1":"required,alphanum", + "line2":"alphanum", + "postal-code":"numeric", + }, +} + +var inputMap = map[string]interface{}{ + "name":"Bob", + "family":"Smith", + "email":"foo@bar.baz", + "address":map[string]interface{}{ + "line1":"", + "line2":"", + "postal-code":"", + }, +} + +result, err := govalidator.ValidateMap(inputMap, mapTemplate) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` + +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +}) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +}) +``` + +###### Loop over Error() +By default .Error() returns all errors in a single String. To access each error you can do this: +```go + if err != nil { + errs := err.(govalidator.Errors).Errors() + for _, e := range errs { + fmt.Println(e.Error()) + } + } +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 00000000000..3e1da7cb480 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,87 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values +type ReduceIterator func(interface{}, interface{}) interface{} + +// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. +func Some(array []interface{}, iterator ConditionIterator) bool { + res := false + for index, data := range array { + res = res || iterator(data, index) + } + return res +} + +// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. +func Every(array []interface{}, iterator ConditionIterator) bool { + res := true + for index, data := range array { + res = res && iterator(data, index) + } + return res +} + +// Reduce boils down a list of values into a single value by ReduceIterator +func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { + for _, data := range array { + initialValue = iterator(initialValue, data) + } + return initialValue +} + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 00000000000..d68e990fc25 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,81 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return res +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(value interface{}) (res float64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = float64(val.Int()) + case uint, uint8, uint16, uint32, uint64: + res = float64(val.Uint()) + case float32, float64: + res = val.Float() + case string: + res, err = strconv.ParseFloat(val.String(), 64) + if err != nil { + res = 0 + } + default: + err = fmt.Errorf("ToInt: unknown interface type %T", value) + res = 0 + } + + return +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case float32, float64: + res = int64(val.Float()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("ToInt: invalid numeric format %g", value) + res = 0 + } + default: + err = fmt.Errorf("ToInt: unknown interface type %T", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go new file mode 100644 index 00000000000..55dce62dc8c --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/doc.go @@ -0,0 +1,3 @@ +package govalidator + +// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 00000000000..1da2336f47e --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,47 @@ +package govalidator + +import ( + "sort" + "strings" +) + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + sort.Strings(errs) + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 00000000000..5041d9e8684 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,100 @@ +package govalidator + +import ( + "math" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRangeInt returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRangeFloat32 returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRangeFloat64 returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. +// All types must the same type. +// False if value doesn't lie in range or if it incompatible or not comparable +func InRange(value interface{}, left interface{}, right interface{}) bool { + switch value.(type) { + case int: + intValue, _ := ToInt(value) + intLeft, _ := ToInt(left) + intRight, _ := ToInt(right) + return InRangeInt(intValue, intLeft, intRight) + case float32, float64: + intValue, _ := ToFloat(value) + intLeft, _ := ToFloat(left) + intRight, _ := ToFloat(right) + return InRangeFloat64(intValue, intLeft, intRight) + case string: + return value.(string) >= left.(string) && value.(string) <= right.(string) + default: + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 00000000000..bafc3765ea1 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,113 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" + IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" + IMSI string = "^\\d{14,15}$" + E164 string = `^\+?[1-9]\d{1,14}$` +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxMagnetURI = regexp.MustCompile(MagnetURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxARWinPath = regexp.MustCompile(WinARPath) + rxARUnixPath = regexp.MustCompile(UnixARPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + rxIMEI = regexp.MustCompile(IMEI) + rxIMSI = regexp.MustCompile(IMSI) + rxE164 = regexp.MustCompile(E164) +) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 00000000000..c573abb51af --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,656 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accept additional parameters. +type ParamValidator func(str string, params ...string) bool + +// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value +type InterfaceParamValidator func(in interface{}, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value +var InterfaceParamTagMap = map[string]InterfaceParamValidator{ + "type": IsType, +} + +// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. +var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ + "type": regexp.MustCompile(`^type\((.*)\)$`), +} + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": IsInRaw, + "rsapub": IsRsaPub, + "minstringlength": MinStringLength, + "maxstringlength": MaxStringLength, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), + "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), + "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "notnull": IsNotNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, + "IMEI": IsIMEI, + "ulid": IsULID, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", + "VEF", "VES", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 00000000000..f4c30f824a2 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains checks if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches checks if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trims characters from the left side of the input. +// If second argument is empty, it will remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trims characters from the right side of the input. +// If second argument is empty, it will remove trailing spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trims characters from both sides of the input. +// If second argument is empty, it will remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList removes characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList removes characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replaces regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replaces <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse returns reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines splits string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine returns specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags removes all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName returns safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pads left side of a string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pads right side of a string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pads both sides of a string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides. +// Note that padding string can be unicode and more then one character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 00000000000..c9c4fac0655 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1768 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const rfc3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail checks if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail checks if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL checks if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL checks if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI checks if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter checks if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric checks if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric checks if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal checks if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor checks if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase checks if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase checks if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt checks if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat checks if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy checks if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull checks if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// IsNotNull checks if the string is not null. +func IsNotNull(str string) bool { + return !IsNull(str) +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength checks if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 checks if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 checks if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 checks if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID checks if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var ulidDec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const ulidEncodedSize = 26 + +// IsULID checks if the string is a ULID. +// +// Implementation got from: +// https://github.com/oklog/ulid (Apache-2.0 License) +// +func IsULID(str string) bool { + // Check if a base32 encoded ULID is the right length. + if len(str) != ulidEncodedSize { + return false + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if ulidDec[str[0]] == 0xFF || + ulidDec[str[1]] == 0xFF || + ulidDec[str[2]] == 0xFF || + ulidDec[str[3]] == 0xFF || + ulidDec[str[4]] == 0xFF || + ulidDec[str[5]] == 0xFF || + ulidDec[str[6]] == 0xFF || + ulidDec[str[7]] == 0xFF || + ulidDec[str[8]] == 0xFF || + ulidDec[str[9]] == 0xFF || + ulidDec[str[10]] == 0xFF || + ulidDec[str[11]] == 0xFF || + ulidDec[str[12]] == 0xFF || + ulidDec[str[13]] == 0xFF || + ulidDec[str[14]] == 0xFF || + ulidDec[str[15]] == 0xFF || + ulidDec[str[16]] == 0xFF || + ulidDec[str[17]] == 0xFF || + ulidDec[str[18]] == 0xFF || + ulidDec[str[19]] == 0xFF || + ulidDec[str[20]] == 0xFF || + ulidDec[str[21]] == 0xFF || + ulidDec[str[22]] == 0xFF || + ulidDec[str[23]] == 0xFF || + ulidDec[str[24]] == 0xFF || + ulidDec[str[25]] == 0xFF { + return false + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if str[0] > '7' { + return false + } + return true +} + +// IsCreditCard checks if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + + number, _ := ToInt(sanitized) + number, lastDigit := number / 10, number % 10 + + var sum int64 + for i:=0; number > 0; i++ { + digit := number % 10 + + if i % 2 == 0 { + digit *= 2 + if digit > 9 { + digit -= 9 + } + } + + sum += digit + number = number / 10 + } + + return (sum + lastDigit) % 10 == 0 +} + +// IsISBN10 checks if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 checks if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN checks if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be checks both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII checks if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 checks if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath checks is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +//IsWinFilePath checks both relative & absolute paths in Windows +func IsWinFilePath(str string) bool { + if rxARWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false + } + return true + } + return false +} + +//IsUnixFilePath checks both relative & absolute paths in Unix +func IsUnixFilePath(str string) bool { + if rxARUnixPath.MatchString(str) { + return true + } + return false +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsMagnetURI checks if a string is valid magnet URI +func IsMagnetURI(str string) bool { + return rxMagnetURI.MatchString(str) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + var len string + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha3-224" { + len = "56" + } else if algo == "sha256" || algo == "sha3-256" { + len = "64" + } else if algo == "sha384" || algo == "sha3-384" { + len = "96" + } else if algo == "sha512" || algo == "sha3-512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` +func IsSHA3224(str string) bool { + return IsHash(str, "sha3-224") +} + +// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` +func IsSHA3256(str string) bool { + return IsHash(str, "sha3-256") +} + +// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` +func IsSHA3384(str string) bool { + return IsHash(str, "sha3-384") +} + +// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` +func IsSHA3512(str string) bool { + return IsHash(str, "sha3-512") +} + +// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` +func IsSHA512(str string) bool { + return IsHash(str, "sha512") +} + +// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` +func IsSHA384(str string) bool { + return IsHash(str, "sha384") +} + +// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` +func IsSHA256(str string) bool { + return IsHash(str, "sha256") +} + +// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` +func IsTiger192(str string) bool { + return IsHash(str, "tiger192") +} + +// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` +func IsTiger160(str string) bool { + return IsHash(str, "tiger160") +} + +// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` +func IsRipeMD160(str string) bool { + return IsHash(str, "ripemd160") +} + +// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` +func IsSHA1(str string) bool { + return IsHash(str, "sha1") +} + +// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` +func IsTiger128(str string) bool { + return IsHash(str, "tiger128") +} + +// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` +func IsRipeMD128(str string) bool { + return IsHash(str, "ripemd128") +} + +// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` +func IsCRC32(str string) bool { + return IsHash(str, "crc32") +} + +// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` +func IsCRC32b(str string) bool { + return IsHash(str, "crc32b") +} + +// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` +func IsMD5(str string) bool { + return IsHash(str, "md5") +} + +// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` +func IsMD4(str string) bool { + return IsHash(str, "md4") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 checks if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 checks if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC checks if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude checks if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude checks if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsIMEI checks if a string is valid IMEI +func IsIMEI(str string) bool { + return rxIMEI.MatchString(str) +} + +// IsIMSI checks if a string is valid IMSI +func IsIMSI(str string) bool { + if !rxIMSI.MatchString(str) { + return false + } + + mcc, err := strconv.ParseInt(str[0:3], 10, 32) + if err != nil { + return false + } + + switch mcc { + case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: + case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: + case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: + case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: + case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: + case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: + case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: + case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: + case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: + case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: + case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: + case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: + case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: + case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: + case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: + case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: + case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: + case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: + case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: + case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: + case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: + case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: + case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: + case 738, 740, 742, 744, 746, 748, 750, 995: + return true + default: + return false + } + return true +} + +// IsRsaPublicKey checks if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +// IsRegex checks if a give string is a valid regex with RE2 syntax or not +func IsRegex(str string) bool { + if _, err := regexp.Compile(str); err == nil { + return true + } + return false +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func prependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = prependPathToErrors(err3, path) + } + return err2 + } + return err +} + +// ValidateArray performs validation according to condition iterator that validates every element of the array +func ValidateArray(array []interface{}, iterator ConditionIterator) bool { + return Every(array, iterator) +} + +// ValidateMap use validation map for fields. +// result will be equal to `false` if there are any errors. +// s is the map containing the data to be validated. +// m is the validation map in the form: +// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} +func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + var errs Errors + var index int + val := reflect.ValueOf(s) + for key, value := range s { + presentResult := true + validator, ok := m[key] + if !ok { + presentResult = false + var err error + err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + valueField := reflect.ValueOf(value) + mapResult := true + typeResult := true + structResult := true + resultField := true + switch subValidator := validator.(type) { + case map[string]interface{}: + var err error + if v, ok := value.(map[string]interface{}); !ok { + mapResult = false + err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } else { + mapResult, err = ValidateMap(v, subValidator) + if err != nil { + mapResult = false + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + } + case string: + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + subValidator != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + } + resultField, err = typeCheck(valueField, reflect.StructField{ + Name: key, + PkgPath: "", + Type: val.Type(), + Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), + Offset: 0, + Index: []int{index}, + Anonymous: false, + }, val, nil) + if err != nil { + errs = append(errs, err) + } + case nil: + // already handlerd when checked before + default: + typeResult = false + err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + result = result && presentResult && typeResult && resultField && structResult && mapResult + index++ + } + // checks required keys + requiredResult := true + for key, value := range m { + if schema, ok := value.(string); ok { + tags := parseTagIntoMap(schema) + if required, ok := tags["required"]; ok { + if _, ok := s[key]; !ok { + requiredResult = false + if required.customErrorMessage != "" { + err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} + } else { + err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} + } + errs = append(errs, err) + } + } + } + } + + if len(errs) > 0 { + err = errs + } + return result && requiredResult, err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = prependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// ValidateStructAsync performs async validation of the struct and returns results through the channels +func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateStruct(s) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + +// ValidateMapAsync performs async validation of the map and returns results through the channels +func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateMap(s, m) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver checks if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsType checks if interface is of some type +func IsType(v interface{}, params ...string) bool { + if len(params) == 1 { + typ := params[0] + return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) + } + return false +} + +// IsTime checks if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsUnixTime checks if string is valid unix timestamp value +func IsUnixTime(str string) bool { + if _, err := strconv.Atoi(str); err == nil { + return true + } + return false +} + +// IsRFC3339 checks if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, rfc3339WithoutZone) +} + +// IsISO4217 checks if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength checks string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength checks string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub checks whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength checks string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// MinStringLength checks string's minimum length (including multi byte strings) +func MinStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + return strLength >= int(min) + } + + return false +} + +// MaxStringLength checks string's maximum length (including multi byte strings) +func MaxStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + max, _ := ToInt(params[0]) + return strLength <= int(max) + } + + return false +} + +// Range checks string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +// IsInRaw checks if string is in list of allowed values +func IsInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn checks if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // checks if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if isEmptyValue(v) { + // an empty value is not validated, checks only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // checks whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // checks for interface param validators + for key, value := range InterfaceParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := InterfaceParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + field := fmt.Sprint(v) + if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + } + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option checks the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // checks whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // checks for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then checks its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return true, nil + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +// isEmptyValue checks whether value empty or not +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e := e.(type) { + case Error: + m[e.Name] = e.Err.Error() + case Errors: + for _, item := range e.Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } + +func IsE164(str string) bool { + return rxE164.MatchString(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 00000000000..bc5f7b0864b --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race -v ./... diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 00000000000..102fb9a691b --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE new file mode 100644 index 00000000000..5ba5c86fcb0 --- /dev/null +++ b/vendor/github.com/blang/semver/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 00000000000..08b2e4a3d76 --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 00000000000..a74bf7c4494 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 00000000000..1cf8ebdd9c1 --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.5.1" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 00000000000..fca406d4793 --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Index(ap, "x") != -1 { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 00000000000..8ee0842e6ac --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,418 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precendence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + s = strings.Join(parts, ".") + } + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 00000000000..e18f880826a --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 00000000000..eb4d802666e --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/cenkalti/backoff/v5/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore new file mode 100644 index 00000000000..50d95c548b6 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 00000000000..658c37436d9 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/v5/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE new file mode 100644 index 00000000000..89b81799655 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v5/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md new file mode 100644 index 00000000000..4611b1d1701 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -0,0 +1,31 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. + +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/v5/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go new file mode 100644 index 00000000000..dd2b24ca735 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 00000000000..beb2b38a23d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 00000000000..79d425e8746 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,118 @@ +package backoff + +import ( + "math/rand/v2" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +Example: Given the following default arguments, for 9 tries the sequence will be: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 00000000000..32a7f988347 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of all attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Unwrap() + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/v5/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go new file mode 100644 index 00000000000..f0d4b2ae721 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -0,0 +1,83 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + timer timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + timer: &defaultTimer{}, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v5/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go new file mode 100644 index 00000000000..a895309747d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/digitorus/pkcs7/.gitignore b/vendor/github.com/digitorus/pkcs7/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/digitorus/pkcs7/LICENSE b/vendor/github.com/digitorus/pkcs7/LICENSE new file mode 100644 index 00000000000..75f3209085b --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/digitorus/pkcs7/Makefile b/vendor/github.com/digitorus/pkcs7/Makefile new file mode 100644 index 00000000000..07c78e14c04 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/Makefile @@ -0,0 +1,20 @@ +all: vet staticcheck test + +test: + GODEBUG=x509sha1=1 go test -covermode=count -coverprofile=coverage.out . + +showcoverage: test + go tool cover -html=coverage.out + +vet: + go vet . + +lint: + golint . + +staticcheck: + staticcheck . + +gettools: + go get -u honnef.co/go/tools/... + go get -u golang.org/x/lint/golint diff --git a/vendor/github.com/digitorus/pkcs7/README.md b/vendor/github.com/digitorus/pkcs7/README.md new file mode 100644 index 00000000000..a55d117c682 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/README.md @@ -0,0 +1,69 @@ +# pkcs7 + +[![GoDoc](https://godoc.org/go.mozilla.org/pkcs7?status.svg)](https://godoc.org/go.mozilla.org/pkcs7) +[![Build Status](https://github.com/mozilla-services/pkcs7/workflows/CI/badge.svg?branch=master&event=push)](https://github.com/mozilla-services/pkcs7/actions/workflows/ci.yml?query=branch%3Amaster+event%3Apush) + +pkcs7 implements parsing and creating signed and enveloped messages. + +```go +package main + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + + "go.mozilla.org/pkcs7" +) + +func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { + toBeSigned, err := NewSignedData(content) + if err != nil { + err = fmt.Errorf("Cannot initialize signed data: %s", err) + return + } + if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { + err = fmt.Errorf("Cannot add signer: %s", err) + return + } + + // Detach signature, omit if you want an embedded signature + toBeSigned.Detach() + + signed, err = toBeSigned.Finish() + if err != nil { + err = fmt.Errorf("Cannot finish signing data: %s", err) + return + } + + // Verify the signature + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) + p7, err := pkcs7.Parse(signed) + if err != nil { + err = fmt.Errorf("Cannot parse our signed data: %s", err) + return + } + + // since the signature was detached, reattach the content here + p7.Content = content + + if bytes.Compare(content, p7.Content) != 0 { + err = fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) + return + } + if err = p7.Verify(); err != nil { + err = fmt.Errorf("Cannot verify our signed data: %s", err) + return + } + + return signed, nil +} +``` + + + +## Credits +This is a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7) diff --git a/vendor/github.com/digitorus/pkcs7/ber.go b/vendor/github.com/digitorus/pkcs7/ber.go new file mode 100644 index 00000000000..31963b119f7 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/ber.go @@ -0,0 +1,269 @@ +package pkcs7 + +import ( + "bytes" + "errors" +) + +type asn1Object interface { + EncodeTo(writer *bytes.Buffer) error +} + +type asn1Structured struct { + tagBytes []byte + content []asn1Object +} + +func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { + //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) + inner := new(bytes.Buffer) + for _, obj := range s.content { + err := obj.EncodeTo(inner) + if err != nil { + return err + } + } + out.Write(s.tagBytes) + encodeLength(out, inner.Len()) + out.Write(inner.Bytes()) + return nil +} + +type asn1Primitive struct { + tagBytes []byte + length int + content []byte +} + +func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { + _, err := out.Write(p.tagBytes) + if err != nil { + return err + } + if err = encodeLength(out, p.length); err != nil { + return err + } + //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + out.Write(p.content) + + return nil +} + +func ber2der(ber []byte) ([]byte, error) { + if len(ber) == 0 { + return nil, errors.New("ber2der: input ber is empty") + } + //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + out := new(bytes.Buffer) + + obj, _, err := readObject(ber, 0) + if err != nil { + return nil, err + } + obj.EncodeTo(out) + + return out.Bytes(), nil +} + +// encodes lengths that are longer than 127 into string of bytes +func marshalLongLength(out *bytes.Buffer, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +// computes the byte length of an encoded length value +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +// encodes the length in DER format +// If the length fits in 7 bits, the value is encoded directly. +// +// Otherwise, the number of bytes to encode the length is first determined. +// This number is likely to be 4 or less for a 32bit length. This number is +// added to 0x80. The length is encoded in big endian encoding follow after +// +// Examples: +// length | byte 1 | bytes n +// 0 | 0x00 | - +// 120 | 0x78 | - +// 200 | 0x81 | 0xC8 +// 500 | 0x82 | 0x01 0xF4 +// +func encodeLength(out *bytes.Buffer, length int) (err error) { + if length >= 128 { + l := lengthLength(length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLongLength(out, length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(length)) + if err != nil { + return + } + } + return +} + +func readObject(ber []byte, offset int) (asn1Object, int, error) { + berLen := len(ber) + if offset >= berLen { + return nil, 0, errors.New("ber2der: offset is after end of ber data") + } + tagStart := offset + b := ber[offset] + offset++ + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + tag := b & 0x1F // last 5 bits + if tag == 0x1F { + tag = 0 + for ber[offset] >= 0x80 { + tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + // jvehent 20170227: this doesn't appear to be used anywhere... + //tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + tagEnd := offset + + kind := b & 0x20 + if kind == 0 { + debugprint("--> Primitive\n") + } else { + debugprint("--> Constructed\n") + } + // read length + var length int + l := ber[offset] + offset++ + if l >= 0x80 && offset >= berLen { + // if indefinite or multibyte length, we need to verify there is at least one more byte available + // otherwise we need to be flexible here for length == 0 conditions + // validation that the length is available is done after the length is correctly parsed + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + indefinite := false + if l > 0x80 { + numberOfBytes := (int)(l & 0x7F) + if numberOfBytes > 4 { // int is only guaranteed to be 32bit + return nil, 0, errors.New("ber2der: BER tag length too long") + } + if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F { + return nil, 0, errors.New("ber2der: BER tag length is negative") + } + if offset + numberOfBytes > berLen { + // == condition is not checked here, this allows for a more descreptive error when the parsed length is + // compared with the remaining available bytes (`contentEnd > berLen`) + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + if (int)(ber[offset]) == 0x0 && (numberOfBytes == 1 || ber[offset+1] <= 0x7F) { + // `numberOfBytes == 1` is an important conditional to avoid a potential out of bounds panic with `ber[offset+1]` + return nil, 0, errors.New("ber2der: BER tag length has leading zero") + } + debugprint("--> (compute length) indicator byte: %x\n", l) + //debugprint("--> (compute length) length bytes: %x\n", ber[offset:offset+numberOfBytes]) + for i := 0; i < numberOfBytes; i++ { + length = length*256 + (int)(ber[offset]) + offset++ + } + } else if l == 0x80 { + indefinite = true + } else { + length = (int)(l) + } + if length < 0 { + return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") + } + //fmt.Printf("--> length : %d\n", length) + contentEnd := offset + length + if contentEnd > berLen { + return nil, 0, errors.New("ber2der: BER tag length is more than available data") + } + debugprint("--> content start : %d\n", offset) + debugprint("--> content end : %d\n", contentEnd) + //debugprint("--> content : %x\n", ber[offset:contentEnd]) + var obj asn1Object + if indefinite && kind == 0 { + return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding") + } + if kind == 0 { + obj = asn1Primitive{ + tagBytes: ber[tagStart:tagEnd], + length: length, + content: ber[offset:contentEnd], + } + } else { + var subObjects []asn1Object + for (offset < contentEnd) || indefinite { + var subObj asn1Object + var err error + subObj, offset, err = readObject(ber, offset) + if err != nil { + return nil, 0, err + } + subObjects = append(subObjects, subObj) + + if indefinite { + terminated, err := isIndefiniteTermination(ber, offset) + if err != nil { + return nil, 0, err + } + + if terminated { + break + } + } + } + obj = asn1Structured{ + tagBytes: ber[tagStart:tagEnd], + content: subObjects, + } + } + + // Apply indefinite form length with 0x0000 terminator. + if indefinite { + contentEnd = offset + 2 + } + + return obj, contentEnd, nil +} + +func isIndefiniteTermination(ber []byte, offset int) (bool, error) { + if len(ber) - offset < 2 { + return false, errors.New("ber2der: Invalid BER format") + } + + return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil +} + +func debugprint(format string, a ...interface{}) { + //fmt.Printf(format, a) +} diff --git a/vendor/github.com/digitorus/pkcs7/decrypt.go b/vendor/github.com/digitorus/pkcs7/decrypt.go new file mode 100644 index 00000000000..0d088d6287c --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/decrypt.go @@ -0,0 +1,177 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" +) + +// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed +var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") + +// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data +var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") + +// Decrypt decrypts encrypted content info for recipient cert and private key +func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) { + data, ok := p7.raw.(envelopedData) + if !ok { + return nil, ErrNotEncryptedContent + } + recipient := selectRecipientForCertificate(data.RecipientInfos, cert) + if recipient.EncryptedKey == nil { + return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") + } + switch pkey := pkey.(type) { + case *rsa.PrivateKey: + var contentKey []byte + contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey) + if err != nil { + return nil, err + } + return data.EncryptedContentInfo.decrypt(contentKey) + } + return nil, ErrUnsupportedAlgorithm +} + +// DecryptUsingPSK decrypts encrypted data using caller provided +// pre-shared secret +func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { + data, ok := p7.raw.(encryptedData) + if !ok { + return nil, ErrNotEncryptedContent + } + return data.EncryptedContentInfo.decrypt(key) +} + +func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { + alg := eci.ContentEncryptionAlgorithm.Algorithm + if !alg.Equal(OIDEncryptionAlgorithmDESCBC) && + !alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES256CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && + !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg) + return nil, ErrUnsupportedAlgorithm + } + + // EncryptedContent can either be constructed of multple OCTET STRINGs + // or _be_ a tagged OCTET STRING + var cyphertext []byte + if eci.EncryptedContent.IsCompound { + // Complex case to concat all of the children OCTET STRINGs + var buf bytes.Buffer + cypherbytes := eci.EncryptedContent.Bytes + for { + var part []byte + cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part) + buf.Write(part) + if cypherbytes == nil { + break + } + } + cyphertext = buf.Bytes() + } else { + // Simple case, the bytes _are_ the cyphertext + cyphertext = eci.EncryptedContent.Bytes + } + + var block cipher.Block + var err error + + switch { + case alg.Equal(OIDEncryptionAlgorithmDESCBC): + block, err = des.NewCipher(key) + case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC): + block, err = des.NewTripleDESCipher(key) + case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM): + fallthrough + case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC): + block, err = aes.NewCipher(key) + } + + if err != nil { + return nil, err + } + + if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + params := aesGCMParameters{} + paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes + + _, err := asn1.Unmarshal(paramBytes, ¶ms) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + if len(params.Nonce) != gcm.NonceSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + if params.ICVLen != gcm.Overhead() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + + plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil) + if err != nil { + return nil, err + } + + return plaintext, nil + } + + iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes + if len(iv) != block.BlockSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are malformed") + } + mode := cipher.NewCBCDecrypter(block, iv) + plaintext := make([]byte, len(cyphertext)) + mode.CryptBlocks(plaintext, cyphertext) + if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil { + return nil, err + } + return plaintext, nil +} + +func unpad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + if len(data)%blocklen != 0 || len(data) == 0 { + return nil, fmt.Errorf("invalid data len %d", len(data)) + } + + // the last byte is the length of padding + padlen := int(data[len(data)-1]) + + // check padding integrity, all bytes should be the same + pad := data[len(data)-padlen:] + for _, padbyte := range pad { + if padbyte != byte(padlen) { + return nil, errors.New("invalid padding") + } + } + + return data[:len(data)-padlen], nil +} + +func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo { + for _, recp := range recipients { + if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) { + return recp + } + } + return recipientInfo{} +} diff --git a/vendor/github.com/digitorus/pkcs7/encrypt.go b/vendor/github.com/digitorus/pkcs7/encrypt.go new file mode 100644 index 00000000000..6b2655708c6 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/encrypt.go @@ -0,0 +1,399 @@ +package pkcs7 + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +type envelopedData struct { + Version int + RecipientInfos []recipientInfo `asn1:"set"` + EncryptedContentInfo encryptedContentInfo +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type recipientInfo struct { + Version int + IssuerAndSerialNumber issuerAndSerial + KeyEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedKey []byte +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent asn1.RawValue `asn1:"tag:0,optional"` +} + +const ( + // EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm + EncryptionAlgorithmDESCBC = iota + + // EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES128CBC + + // EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES256CBC + + // EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm + EncryptionAlgorithmAES128GCM + + // EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm + EncryptionAlgorithmAES256GCM +) + +// ContentEncryptionAlgorithm determines the algorithm used to encrypt the +// plaintext message. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC + +// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt +// content with an unsupported algorithm. +var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") + +// ErrPSKNotProvided is returned when attempting to encrypt +// using a PSK without actually providing the PSK. +var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") + +const nonceSize = 12 + +type aesGCMParameters struct { + Nonce []byte `asn1:"tag:4"` + ICVLen int +} + +func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128GCM: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128GCM + case EncryptionAlgorithmAES256GCM: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256GCM + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm) + } + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create nonce + nonce := make([]byte, nonceSize) + + _, err := rand.Read(nonce) + if err != nil { + return nil, nil, err + } + + // Encrypt content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, nil, err + } + + ciphertext := gcm.Seal(nil, nonce, content, nil) + + // Prepare ASN.1 Encrypted Content Info + paramSeq := aesGCMParameters{ + Nonce: nonce, + ICVLen: gcm.Overhead(), + } + + paramBytes, err := asn1.Marshal(paramSeq) + if err != nil { + return nil, nil, err + } + + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{ + Tag: asn1.TagSequence, + Bytes: paramBytes, + }, + }, + EncryptedContent: marshalEncryptedContent(ciphertext), + } + + return key, &eci, nil +} + +func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + if key == nil { + // Create DES key + key = make([]byte, 8) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, des.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := des.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmDESCBC, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128CBC: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128CBC + case EncryptionAlgorithmAES256CBC: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256CBC + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm) + } + + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, aes.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +// Encrypt creates and returns an envelope data PKCS7 structure with encrypted +// recipient keys for each recipient public key. +// +// The algorithm used to perform encryption is determined by the current value +// of the global ContentEncryptionAlgorithm package variable. By default, the +// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the +// value before calling Encrypt(). For example: +// +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// +// TODO(fullsailor): Add support for encrypting content with other algorithms +func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { + var eci *encryptedContentInfo + var key []byte + var err error + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + key, eci, err = encryptDESCBC(content, nil) + case EncryptionAlgorithmAES128CBC: + fallthrough + case EncryptionAlgorithmAES256CBC: + key, eci, err = encryptAESCBC(content, nil) + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + key, eci, err = encryptAESGCM(content, nil) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare each recipient's encrypted cipher key + recipientInfos := make([]recipientInfo, len(recipients)) + for i, recipient := range recipients { + encrypted, err := encryptKey(key, recipient) + if err != nil { + return nil, err + } + ias, err := cert2issuerAndSerial(recipient) + if err != nil { + return nil, err + } + info := recipientInfo{ + Version: 0, + IssuerAndSerialNumber: ias, + KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmRSA, + }, + EncryptedKey: encrypted, + } + recipientInfos[i] = info + } + + // Prepare envelope content + envelope := envelopedData{ + EncryptedContentInfo: *eci, + Version: 0, + RecipientInfos: recipientInfos, + } + innerContent, err := asn1.Marshal(envelope) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEnvelopedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, +// encrypted using caller provided pre-shared secret. +func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { + var eci *encryptedContentInfo + var err error + + if key == nil { + return nil, ErrPSKNotProvided + } + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + _, eci, err = encryptDESCBC(content, key) + + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + _, eci, err = encryptAESGCM(content, key) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare encrypted-data content + ed := encryptedData{ + Version: 0, + EncryptedContentInfo: *eci, + } + innerContent, err := asn1.Marshal(ed) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEncryptedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +func marshalEncryptedContent(content []byte) asn1.RawValue { + asn1Content, _ := asn1.Marshal(content) + return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true} +} + +func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) { + if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil { + return rsa.EncryptPKCS1v15(rand.Reader, pub, key) + } + return nil, ErrUnsupportedAlgorithm +} + +func pad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + padlen := blocklen - (len(data) % blocklen) + if padlen == 0 { + padlen = blocklen + } + pad := bytes.Repeat([]byte{byte(padlen)}, padlen) + return append(data, pad...), nil +} diff --git a/vendor/github.com/digitorus/pkcs7/pkcs7.go b/vendor/github.com/digitorus/pkcs7/pkcs7.go new file mode 100644 index 00000000000..aca3c53f432 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/pkcs7.go @@ -0,0 +1,302 @@ +// Package pkcs7 implements parsing and generation of some PKCS#7 structures. +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "sort" + + _ "crypto/sha1" // for crypto.SHA1 +) + +// PKCS7 Represents a PKCS7 structure +type PKCS7 struct { + Content []byte + Certificates []*x509.Certificate + CRLs []pkix.CertificateList + Signers []signerInfo + raw interface{} +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"explicit,optional,tag:0"` +} + +// ErrUnsupportedContentType is returned when a PKCS7 content is not supported. +// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), +// and Enveloped Data are supported (1.2.840.113549.1.7.3) +var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") + +type unsignedData []byte + +var ( + // Signed Data OIDs + OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1} + OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2} + OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3} + OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6} + OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3} + OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4} + OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5} + + // Digest Algorithms + OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + + OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + // Signature Algorithms + OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + + OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + + OIDEncryptionAlgorithmEDDSA25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + + // Encryption Algorithms + OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} + OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} + OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} + OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} +) + +func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { + switch { + case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1), + oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1), + oid.Equal(OIDEncryptionAlgorithmRSA): + return crypto.SHA1, nil + case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256): + return crypto.SHA256, nil + case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384): + return crypto.SHA384, nil + case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512): + return crypto.SHA512, nil + } + return crypto.Hash(0), ErrUnsupportedAlgorithm +} + +// GetDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm +// and returns the corresponding OID digest algorithm +func GetDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) { + switch digestAlg { + case x509.SHA1WithRSA, x509.ECDSAWithSHA1: + return OIDDigestAlgorithmSHA1, nil + case x509.SHA256WithRSA, x509.ECDSAWithSHA256: + return OIDDigestAlgorithmSHA256, nil + case x509.SHA384WithRSA, x509.ECDSAWithSHA384: + return OIDDigestAlgorithmSHA384, nil + case x509.SHA512WithRSA, x509.ECDSAWithSHA512, x509.PureEd25519: + return OIDDigestAlgorithmSHA512, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") +} + +// getOIDForEncryptionAlgorithm takes a private key or signer and +// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm +func getOIDForEncryptionAlgorithm(keyOrSigner interface{}, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { + _, ok := keyOrSigner.(*dsa.PrivateKey) + if ok { + return OIDDigestAlgorithmDSA, nil + } + + signer, ok := keyOrSigner.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: key does not implement crypto.Signer") + } + switch signer.Public().(type) { + case *rsa.PublicKey: + switch { + default: + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA): + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDEncryptionAlgorithmRSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDEncryptionAlgorithmRSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDEncryptionAlgorithmRSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDEncryptionAlgorithmRSASHA512, nil + } + case *ecdsa.PublicKey: + switch { + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDDigestAlgorithmECDSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDDigestAlgorithmECDSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDDigestAlgorithmECDSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDDigestAlgorithmECDSASHA512, nil + } + case ed25519.PublicKey: + return OIDEncryptionAlgorithmEDDSA25519, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown key type %T", signer.Public()) +} + +// Parse decodes a DER encoded PKCS7 package +func Parse(data []byte) (p7 *PKCS7, err error) { + if len(data) == 0 { + return nil, errors.New("pkcs7: input data is empty") + } + var info contentInfo + der, err := ber2der(data) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(der, &info) + if len(rest) > 0 { + err = asn1.SyntaxError{Msg: "trailing data"} + return + } + if err != nil { + return + } + + // fmt.Printf("--> Content Type: %s", info.ContentType) + switch { + case info.ContentType.Equal(OIDSignedData): + return parseSignedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEnvelopedData): + return parseEnvelopedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEncryptedData): + return parseEncryptedData(info.Content.Bytes) + } + return nil, ErrUnsupportedContentType +} + +func parseEnvelopedData(data []byte) (*PKCS7, error) { + var ed envelopedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func parseEncryptedData(data []byte) (*PKCS7, error) { + var ed encryptedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { + if len(raw.Raw) == 0 { + return nil, nil + } + + var val asn1.RawValue + if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil { + return nil, err + } + + return x509.ParseCertificates(val.Bytes) +} + +func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { + return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes) +} + +// Attribute represents a key value pair attribute. Value must be marshalable byte +// `encoding/asn1` +type Attribute struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +type attributes struct { + types []asn1.ObjectIdentifier + values []interface{} +} + +// Add adds the attribute, maintaining insertion order +func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) { + attrs.types = append(attrs.types, attrType) + attrs.values = append(attrs.values, value) +} + +type sortableAttribute struct { + SortKey []byte + Attribute attribute +} + +type attributeSet []sortableAttribute + +func (sa attributeSet) Len() int { + return len(sa) +} + +func (sa attributeSet) Less(i, j int) bool { + return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0 +} + +func (sa attributeSet) Swap(i, j int) { + sa[i], sa[j] = sa[j], sa[i] +} + +func (sa attributeSet) Attributes() []attribute { + attrs := make([]attribute, len(sa)) + for i, attr := range sa { + attrs[i] = attr.Attribute + } + return attrs +} + +func (attrs *attributes) ForMarshalling() ([]attribute, error) { + sortables := make(attributeSet, len(attrs.types)) + for i := range sortables { + attrType := attrs.types[i] + attrValue := attrs.values[i] + asn1Value, err := asn1.Marshal(attrValue) + if err != nil { + return nil, err + } + attr := attribute{ + Type: attrType, + Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag + } + encoded, err := asn1.Marshal(attr) + if err != nil { + return nil, err + } + sortables[i] = sortableAttribute{ + SortKey: encoded, + Attribute: attr, + } + } + sort.Sort(sortables) + return sortables.Attributes(), nil +} diff --git a/vendor/github.com/digitorus/pkcs7/sign.go b/vendor/github.com/digitorus/pkcs7/sign.go new file mode 100644 index 00000000000..6cfd2ab9c26 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/sign.go @@ -0,0 +1,456 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ed25519" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "time" +) + +// SignedData is an opaque data structure for creating signed data payloads +type SignedData struct { + sd signedData + certs []*x509.Certificate + data, messageDigest []byte + digestOid asn1.ObjectIdentifier + encryptionOid asn1.ObjectIdentifier +} + +// NewSignedData takes data and initializes a PKCS7 SignedData struct that is +// ready to be signed via AddSigner. The digest algorithm is set to SHA1 by default +// and can be changed by calling SetDigestAlgorithm. +func NewSignedData(data []byte) (*SignedData, error) { + content, err := asn1.Marshal(data) + if err != nil { + return nil, err + } + ci := contentInfo{ + ContentType: OIDData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + sd := signedData{ + ContentInfo: ci, + Version: 1, + } + return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA1}, nil +} + +// SignerInfoConfig are optional values to include when adding a signer +type SignerInfoConfig struct { + ExtraSignedAttributes []Attribute + ExtraUnsignedAttributes []Attribute + SkipCertificates bool +} + +type signedData struct { + Version int `asn1:"default:1"` + DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` + ContentInfo contentInfo + Certificates rawCertificates `asn1:"optional,tag:0"` + CRLs []pkix.CertificateList `asn1:"optional,tag:1"` + SignerInfos []signerInfo `asn1:"set"` +} + +type signerInfo struct { + Version int `asn1:"default:1"` + IssuerAndSerialNumber issuerAndSerial + DigestAlgorithm pkix.AlgorithmIdentifier + AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` + DigestEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedDigest []byte + UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` +} + +type attribute struct { + Type asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +func marshalAttributes(attrs []attribute) ([]byte, error) { + encodedAttributes, err := asn1.Marshal(struct { + A []attribute `asn1:"set"` + }{A: attrs}) + if err != nil { + return nil, err + } + + // Remove the leading sequence octets + var raw asn1.RawValue + asn1.Unmarshal(encodedAttributes, &raw) + return raw.Bytes, nil +} + +type rawCertificates struct { + Raw asn1.RawContent +} + +type issuerAndSerial struct { + IssuerName asn1.RawValue + SerialNumber *big.Int +} + +// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { + sd.digestOid = d +} + +// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { + sd.encryptionOid = d +} + +// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. The signer can +// either be a crypto.Signer or crypto.PrivateKey. +func (sd *SignedData) AddSigner(ee *x509.Certificate, keyOrSigner interface{}, config SignerInfoConfig) error { + var parents []*x509.Certificate + return sd.AddSignerChain(ee, keyOrSigner, parents, config) +} + +// AddSignerChain signs attributes about the content and adds certificates +// and signers infos to the Signed Data. The certificate and private key +// of the end-entity signer are used to issue the signature, and any +// parent of that end-entity that need to be added to the list of +// certifications can be specified in the parents slice. +// +// The signature algorithm used to hash the data is the one of the end-entity +// certificate. The signer can be either a crypto.Signer or crypto.PrivateKey. +func (sd *SignedData) AddSignerChain(ee *x509.Certificate, keyOrSigner interface{}, parents []*x509.Certificate, config SignerInfoConfig) error { + // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of + // the issuer of the end-entity signer is stored in the issuerAndSerialNumber + // section of the SignedData.SignerInfo, alongside the serial number of + // the end-entity. + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + if len(parents) == 0 { + // no parent, the issuer is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + } else { + err := verifyPartialChain(ee, parents) + if err != nil { + return err + } + // the first parent is the issuer + ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} + } + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, + pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + ) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + encryptionOid, err := getOIDForEncryptionAlgorithm(keyOrSigner, sd.digestOid) + if err != nil { + return err + } + attrs := &attributes{} + attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) + attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) + attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) + for _, attr := range config.ExtraSignedAttributes { + attrs.Add(attr.Type, attr.Value) + } + finalAttrs, err := attrs.ForMarshalling() + if err != nil { + return err + } + unsignedAttrs := &attributes{} + for _, attr := range config.ExtraUnsignedAttributes { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + // create signature of signed attributes + signature, err := signAttributes(finalAttrs, keyOrSigner, hash) + if err != nil { + return err + } + signerInfo := signerInfo{ + AuthenticatedAttributes: finalAttrs, + UnauthenticatedAttributes: finalUnsignedAttrs, + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + if !config.SkipCertificates { + sd.certs = append(sd.certs, ee) + if len(parents) > 0 { + sd.certs = append(sd.certs, parents...) + } + } + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signerInfo) + return nil +} + +// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. +// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone +// and does not include any signed attributes like timestamp and so on. +// +// This function is needed to sign old Android APKs, something you probably +// shouldn't do unless you're maintaining backward compatibility for old +// applications. The signer can be either a crypto.Signer or crypto.PrivateKey. +func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, keyOrSigner interface{}, config SignerInfoConfig) error { + var signature []byte + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + + switch pkey := keyOrSigner.(type) { + case *dsa.PrivateKey: + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) + if err != nil { + return err + } + signature, err = asn1.Marshal(dsaSignature{r, s}) + if err != nil { + return err + } + default: + signer, ok := keyOrSigner.(crypto.Signer) + if !ok { + return errors.New("pkcs7: private key does not implement crypto.Signer") + } + + // special case for Ed25519, which hashes as part of the signing algorithm + _, ok = signer.Public().(ed25519.PublicKey) + if ok { + signature, err = signer.Sign(rand.Reader, sd.data, crypto.Hash(0)) + } else { + signature, err = signer.Sign(rand.Reader, sd.messageDigest, hash) + if err != nil { + return err + } + } + } + + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + // no parent, the issue is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + if sd.encryptionOid == nil { + // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, + // infer it from the digest algorithm + sd.encryptionOid, err = getOIDForEncryptionAlgorithm(keyOrSigner, sd.digestOid) + } + if err != nil { + return err + } + signerInfo := signerInfo{ + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + // create signature of signed attributes + sd.certs = append(sd.certs, ee) + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signerInfo) + return nil +} + +func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { + unsignedAttrs := &attributes{} + for _, attr := range extraUnsignedAttrs { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + + si.UnauthenticatedAttributes = finalUnsignedAttrs + + return nil +} + +// AddCertificate adds the certificate to the payload. Useful for parent certificates +func (sd *SignedData) AddCertificate(cert *x509.Certificate) { + sd.certs = append(sd.certs, cert) +} + +// SetContentType sets the content type of the SignedData. For example to specify the +// content type of a time-stamp token according to RFC 3161 section 2.4.2. +func (sd *SignedData) SetContentType(contentType asn1.ObjectIdentifier) { + sd.sd.ContentInfo.ContentType = contentType +} + +// Detach removes content from the signed data struct to make it a detached signature. +// This must be called right before Finish() +func (sd *SignedData) Detach() { + sd.sd.ContentInfo = contentInfo{ContentType: OIDData} +} + +// GetSignedData returns the private Signed Data +func (sd *SignedData) GetSignedData() *signedData { + return &sd.sd +} + +// Finish marshals the content and its signers +func (sd *SignedData) Finish() ([]byte, error) { + sd.sd.Certificates = marshalCertificates(sd.certs) + inner, err := asn1.Marshal(sd.sd) + if err != nil { + return nil, err + } + outer := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, + } + return asn1.Marshal(outer) +} + +// RemoveAuthenticatedAttributes removes authenticated attributes from signedData +// similar to OpenSSL's PKCS7_NOATTR or -noattr flags +func (sd *SignedData) RemoveAuthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].AuthenticatedAttributes = nil + } +} + +// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData +func (sd *SignedData) RemoveUnauthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil + } +} + +// verifyPartialChain checks that a given cert is issued by the first parent in the list, +// then continue down the path. It doesn't require the last parent to be a root CA, +// or to be trusted in any truststore. It simply verifies that the chain provided, albeit +// partial, makes sense. +func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { + if len(parents) == 0 { + return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) + } + err := cert.CheckSignatureFrom(parents[0]) + if err != nil { + return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) + } + if len(parents) == 1 { + // there is no more parent to check, return + return nil + } + return verifyPartialChain(parents[0], parents[1:]) +} + +func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { + var ias issuerAndSerial + // The issuer RDNSequence has to match exactly the sequence in the certificate + // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence + ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} + ias.SerialNumber = cert.SerialNumber + + return ias, nil +} + +// signs the DER encoded form of the attributes with the private key +func signAttributes(attrs []attribute, keyOrSigner interface{}, digestAlg crypto.Hash) ([]byte, error) { + attrBytes, err := marshalAttributes(attrs) + if err != nil { + return nil, err + } + h := digestAlg.New() + h.Write(attrBytes) + hash := h.Sum(nil) + + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + switch pkey := keyOrSigner.(type) { + case *dsa.PrivateKey: + r, s, err := dsa.Sign(rand.Reader, pkey, hash) + if err != nil { + return nil, err + } + return asn1.Marshal(dsaSignature{r, s}) + } + + signer, ok := keyOrSigner.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: private key does not implement crypto.Signer") + } + + // special case for Ed25519, which hashes as part of the signing algorithm + _, ok = signer.Public().(ed25519.PublicKey) + if ok { + return signer.Sign(rand.Reader, attrBytes, crypto.Hash(0)) + } + + return signer.Sign(rand.Reader, hash, digestAlg) +} + +type dsaSignature struct { + R, S *big.Int +} + +// concats and wraps the certificates in the RawValue structure +func marshalCertificates(certs []*x509.Certificate) rawCertificates { + var buf bytes.Buffer + for _, cert := range certs { + buf.Write(cert.Raw) + } + rawCerts, _ := marshalCertificateBytes(buf.Bytes()) + return rawCerts +} + +// Even though, the tag & length are stripped out during marshalling the +// RawContent, we have to encode it into the RawContent. If its missing, +// then `asn1.Marshal()` will strip out the certificate wrapper instead. +func marshalCertificateBytes(certs []byte) (rawCertificates, error) { + var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} + b, err := asn1.Marshal(val) + if err != nil { + return rawCertificates{}, err + } + return rawCertificates{Raw: b}, nil +} + +// DegenerateCertificate creates a signed data structure containing only the +// provided certificate or certificate chain. +func DegenerateCertificate(cert []byte) ([]byte, error) { + rawCert, err := marshalCertificateBytes(cert) + if err != nil { + return nil, err + } + emptyContent := contentInfo{ContentType: OIDData} + sd := signedData{ + Version: 1, + ContentInfo: emptyContent, + Certificates: rawCert, + CRLs: []pkix.CertificateList{}, + } + content, err := asn1.Marshal(sd) + if err != nil { + return nil, err + } + signedContent := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + return asn1.Marshal(signedContent) +} diff --git a/vendor/github.com/digitorus/pkcs7/verify.go b/vendor/github.com/digitorus/pkcs7/verify.go new file mode 100644 index 00000000000..d0e4f0429d6 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/verify.go @@ -0,0 +1,370 @@ +package pkcs7 + +import ( + "crypto/subtle" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "time" +) + +// Verify is a wrapper around VerifyWithChain() that initializes an empty +// trust store, effectively disabling certificate verification when validating +// a signature. +func (p7 *PKCS7) Verify() (err error) { + return p7.VerifyWithChain(nil) +} + +// VerifyWithChain checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to one of the roots in the +// truststore. When the PKCS7 object includes the signing time +// authenticated attr it verifies the chain at that time and UTC now +// otherwise. +func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { + intermediates := x509.NewCertPool() + for _, cert := range(p7.Certificates) { + intermediates.AddCert(cert) + } + + opts := x509.VerifyOptions{ + Roots: truststore, + Intermediates: intermediates, + } + + return p7.VerifyWithOpts(opts) +} + +// VerifyWithChainAtTime checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to a root in the truststore at +// currentTime. It does not use the signing time authenticated +// attribute. +func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) { + intermediates := x509.NewCertPool() + for _, cert := range(p7.Certificates) { + intermediates.AddCert(cert) + } + + opts := x509.VerifyOptions{ + Roots: truststore, + Intermediates: intermediates, + CurrentTime: currentTime, + } + + return p7.VerifyWithOpts(opts) +} + +// VerifyWithOpts checks the signatures of a PKCS7 object. +// +// It accepts x509.VerifyOptions as a parameter. +// This struct contains a root certificate pool, an intermedate certificate pool, +// an optional list of EKUs, and an optional time that certificates should be +// checked as being valid during. + +// If VerifyOpts.Roots is not nil it verifies the chain of trust of +// the end-entity signer cert to one of the roots in the +// truststore. When the PKCS7 object includes the signing time +// authenticated attr it verifies the chain at that time and UTC now +// otherwise. +func (p7 *PKCS7) VerifyWithOpts(opts x509.VerifyOptions) (err error) { + // if KeyUsage isn't set, default to ExtKeyUsageAny + if opts.KeyUsages == nil { + opts.KeyUsages = []x509.ExtKeyUsage{x509.ExtKeyUsageAny} + } + + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + + // if opts.CurrentTime is not set, call verifySignature, + // which will verify the leaf certificate with the current time + if opts.CurrentTime.IsZero() { + for _, signer := range p7.Signers { + if err := verifySignature(p7, signer, opts); err != nil { + return err + } + } + return nil + } + // if opts.CurrentTime is set, call verifySignatureAtTime, + // which will verify the leaf certificate with opts.CurrentTime + for _, signer := range p7.Signers { + if err := verifySignatureAtTime(p7, signer, opts); err != nil { + return err + } + } + return nil +} + +func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, opts x509.VerifyOptions) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var ( + digest []byte + signingTime time.Time + ) + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if opts.Roots != nil { + _, err = ee.Verify(opts) + if err != nil { + return fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +func verifySignature(p7 *PKCS7, signer signerInfo, opts x509.VerifyOptions) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + signingTime := time.Now().UTC() + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var digest []byte + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if opts.Roots != nil { + opts.CurrentTime = signingTime + _, err = ee.Verify(opts) + if err != nil { + return fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +// GetOnlySigner returns an x509.Certificate for the first signer of the signed +// data payload. If there are more or less than one signer, nil is returned +func (p7 *PKCS7) GetOnlySigner() *x509.Certificate { + if len(p7.Signers) != 1 { + return nil + } + signer := p7.Signers[0] + return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) +} + +// UnmarshalSignedAttribute decodes a single attribute from the signer info +func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error { + sd, ok := p7.raw.(signedData) + if !ok { + return errors.New("pkcs7: payload is not signedData content") + } + if len(sd.SignerInfos) < 1 { + return errors.New("pkcs7: payload has no signers") + } + attributes := sd.SignerInfos[0].AuthenticatedAttributes + return unmarshalAttribute(attributes, attributeType, out) +} + +func parseSignedData(data []byte) (*PKCS7, error) { + var sd signedData + asn1.Unmarshal(data, &sd) + certs, err := sd.Certificates.Parse() + if err != nil { + return nil, err + } + // fmt.Printf("--> Signed Data Version %d\n", sd.Version) + + var compound asn1.RawValue + var content unsignedData + + // The Content.Bytes maybe empty on PKI responses. + if len(sd.ContentInfo.Content.Bytes) > 0 { + if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil { + return nil, err + } + } + // Compound octet string + if compound.IsCompound { + if compound.Tag == 4 { + if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil { + return nil, err + } + } else { + content = compound.Bytes + } + } else { + // assuming this is tag 04 + content = compound.Bytes + } + return &PKCS7{ + Content: content, + Certificates: certs, + CRLs: sd.CRLs, + Signers: sd.SignerInfos, + raw: sd}, nil +} + +// MessageDigestMismatchError is returned when the signer data digest does not +// match the computed digest for the contained content +type MessageDigestMismatchError struct { + ExpectedDigest []byte + ActualDigest []byte +} + +func (err *MessageDigestMismatchError) Error() string { + return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest) +} + +func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) { + switch { + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1): + return x509.ECDSAWithSHA1, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256): + return x509.ECDSAWithSHA256, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384): + return x509.ECDSAWithSHA384, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512): + return x509.ECDSAWithSHA512, nil + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.SHA1WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.SHA256WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.SHA384WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.SHA512WithRSA, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA), + digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.DSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.DSAWithSHA256, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.ECDSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.ECDSAWithSHA256, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.ECDSAWithSHA384, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.ECDSAWithSHA512, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmEDDSA25519): + return x509.PureEd25519, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported algorithm %q", + digestEncryption.Algorithm.String()) + } +} + +func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate { + for _, cert := range certs { + if isCertMatchForIssuerAndSerial(cert, ias) { + return cert + } + } + return nil +} + +func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error { + for _, attr := range attrs { + if attr.Type.Equal(attributeType) { + _, err := asn1.Unmarshal(attr.Value.Bytes, out) + return err + } + } + return errors.New("pkcs7: attribute type not in attributes") +} diff --git a/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go b/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go new file mode 100644 index 00000000000..1eb05bc3eae --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go @@ -0,0 +1,182 @@ +// +build go1.11 go1.12 go1.13 go1.14 go1.15 + +package pkcs7 + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "os/exec" + "testing" +) + +func TestVerifyEC2(t *testing.T) { + fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) + p7, err := Parse(fixture.Input) + if err != nil { + t.Errorf("Parse encountered unexpected error: %v", err) + } + p7.Certificates = []*x509.Certificate{fixture.Certificate} + if err := p7.Verify(); err != nil { + t.Errorf("Verify failed with error: %v", err) + } +} + +var EC2IdentityDocumentFixture = ` +-----BEGIN PKCS7----- +MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA +JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh +eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 +cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh +bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs +bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg +OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK +ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj +aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy +YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA +AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n +dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi +IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B +CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG +CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w +LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA +AAAAAA== +-----END PKCS7----- +-----BEGIN CERTIFICATE----- +MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z +ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e +ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 +VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P +hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j +k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U +hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF +lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf +MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW +MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw +vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw +7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K +-----END CERTIFICATE-----` + +func TestDSASignWithOpenSSLAndVerify(t *testing.T) { + content := []byte(` +A ship in port is safe, +but that's not what ships are built for. +-- Grace Hopper`) + // write the content to a temp file + tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpContentFile.Name(), content, 0755) + + // write the signer cert to a temp file + tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0755) + + // write the signer key to a temp file + tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0755) + + tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") + if err != nil { + t.Fatal(err) + } + // call openssl to sign the content + opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", + "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), + "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), + "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") + out, err := opensslCMD.CombinedOutput() + if err != nil { + t.Fatalf("openssl command failed with %s: %s", err, out) + } + + // verify the signed content + pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) + if err != nil { + t.Fatal(err) + } + fmt.Printf("%s\n", pemSignature) + derBlock, _ := pem.Decode(pemSignature) + if derBlock == nil { + t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) + } + p7, err := Parse(derBlock.Bytes) + if err != nil { + t.Fatalf("Parse encountered unexpected error: %v", err) + } + if err := p7.Verify(); err != nil { + t.Fatalf("Verify failed with error: %v", err) + } + os.Remove(tmpSignerCertFile.Name()) // clean up + os.Remove(tmpSignerKeyFile.Name()) // clean up + os.Remove(tmpContentFile.Name()) // clean up +} + +var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- +MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS +PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl +pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith +1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L +vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 +zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo +g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= +-----END PRIVATE KEY-----`) + +var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- +MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du +MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r +bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE +ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC +AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD +Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE +exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii +Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 +V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI +puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl +nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp +rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt +1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT +ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G +CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 +qzy/7yePTlhlpj+ahMM= +-----END CERTIFICATE-----`) + +type DSATestFixture struct { + Input []byte + Certificate *x509.Certificate +} + +func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { + var result DSATestFixture + var derBlock *pem.Block + var pemBlock = []byte(testPEMBlock) + for { + derBlock, pemBlock = pem.Decode(pemBlock) + if derBlock == nil { + break + } + switch derBlock.Type { + case "PKCS7": + result.Input = derBlock.Bytes + case "CERTIFICATE": + result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) + } + } + + return result +} diff --git a/vendor/github.com/digitorus/timestamp/LICENSE b/vendor/github.com/digitorus/timestamp/LICENSE new file mode 100644 index 00000000000..dac8634ce7b --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2017, Digitorus B.V. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/digitorus/timestamp/README.md b/vendor/github.com/digitorus/timestamp/README.md new file mode 100644 index 00000000000..d475e03f993 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/README.md @@ -0,0 +1,13 @@ +# RFC3161 Time-Stamp Protocol (TSP) package for Go + +[![Build & Test](https://github.com/digitorus/timestamp/workflows/Build%20&%20Test/badge.svg)](https://github.com/digitorus/timestamp/actions?query=workflow%3Abuild-and-test) +[![golangci-lint](https://github.com/digitorus/timestamp/workflows/golangci-lint/badge.svg)](https://github.com/digitorus/timestamp/actions?query=workflow%3Agolangci-lint) +[![CodeQL](https://github.com/digitorus/timestamp/workflows/CodeQL/badge.svg)](https://github.com/digitorus/timestamp/actions?query=workflow%3Acodeql) +[![Go Report Card](https://goreportcard.com/badge/github.com/digitorus/timestamp)](https://goreportcard.com/report/github.com/digitorus/timestamp) +[![Coverage Status](https://codecov.io/gh/digitorus/timestamp/branch/master/graph/badge.svg)](https://codecov.io/gh/digitorus/timestamp) +[![Go Reference](https://pkg.go.dev/badge/github.com/digitorus/timestamp.svg)](https://pkg.go.dev/github.com/digitorus/timestamp) + +Time-Stamp Protocol (TSP) package for Go + +#### General +The package timestamp implements the Time-Stamp Protocol (TSP) as specified in RFC3161 (Internet X.509 Public Key Infrastructure Time-Stamp Protocol (TSP)). diff --git a/vendor/github.com/digitorus/timestamp/borrowed.go b/vendor/github.com/digitorus/timestamp/borrowed.go new file mode 100644 index 00000000000..b379b7167f6 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/borrowed.go @@ -0,0 +1,56 @@ +package timestamp + +import ( + "crypto" + "encoding/asn1" +) + +// TODO(vanbroup): taken from "golang.org/x/crypto/ocsp" +// use directly from crypto/x509 when exported as suggested below. + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { + for hash, oid := range hashOIDs { + if hash == target { + return oid + } + } + return nil +} + +// TODO(vanbroup): taken from golang.org/x/crypto/x509 +// asn1BitLength returns the bit-length of bitString by considering the +// most-significant bit in a byte to be the "first" bit. This convention +// matches ASN.1, but differs from almost everything else. +func asn1BitLength(bitString []byte) int { + bitLen := len(bitString) * 8 + + for i := range bitString { + b := bitString[len(bitString)-i-1] + + for bit := uint(0); bit < 8; bit++ { + if (b>>bit)&1 == 1 { + return bitLen + } + bitLen-- + } + } + + return 0 +} diff --git a/vendor/github.com/digitorus/timestamp/rfc3161_struct.go b/vendor/github.com/digitorus/timestamp/rfc3161_struct.go new file mode 100644 index 00000000000..c5692253c53 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/rfc3161_struct.go @@ -0,0 +1,75 @@ +package timestamp + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "math/big" + "time" +) + +// http://www.ietf.org/rfc/rfc3161.txt +// 2.4.1. Request Format +type request struct { + Version int + MessageImprint messageImprint + ReqPolicy asn1.ObjectIdentifier `asn1:"optional"` + Nonce *big.Int `asn1:"optional"` + CertReq bool `asn1:"optional,default:false"` + Extensions []pkix.Extension `asn1:"tag:0,optional"` +} + +type messageImprint struct { + HashAlgorithm pkix.AlgorithmIdentifier + HashedMessage []byte +} + +// 2.4.2. Response Format +type response struct { + Status pkiStatusInfo + TimeStampToken asn1.RawValue `asn1:"optional"` +} + +type pkiStatusInfo struct { + Status Status + StatusString []string `asn1:"optional,utf8"` + FailInfo asn1.BitString `asn1:"optional"` +} + +func (s pkiStatusInfo) FailureInfo() FailureInfo { + fi := []FailureInfo{BadAlgorithm, BadRequest, BadDataFormat, TimeNotAvailable, + UnacceptedPolicy, UnacceptedExtension, AddInfoNotAvailable, SystemFailure} + + for _, f := range fi { + if s.FailInfo.At(int(f)) != 0 { + return f + } + } + + return UnknownFailureInfo +} + +// eContent within SignedData is TSTInfo +type tstInfo struct { + Version int + Policy asn1.ObjectIdentifier + MessageImprint messageImprint + SerialNumber *big.Int + Time time.Time `asn1:"generalized"` + Accuracy accuracy `asn1:"optional"` + Ordering bool `asn1:"optional,default:false"` + Nonce *big.Int `asn1:"optional"` + TSA asn1.RawValue `asn1:"tag:0,optional"` + Extensions []pkix.Extension `asn1:"tag:1,optional"` +} + +// accuracy within TSTInfo +type accuracy struct { + Seconds int64 `asn1:"optional"` + Milliseconds int64 `asn1:"tag:0,optional"` + Microseconds int64 `asn1:"tag:1,optional"` +} + +type qcStatement struct { + StatementID asn1.ObjectIdentifier + StatementInfo asn1.RawValue `asn1:"optional"` +} diff --git a/vendor/github.com/digitorus/timestamp/signing_cert_v2_struct.go b/vendor/github.com/digitorus/timestamp/signing_cert_v2_struct.go new file mode 100644 index 00000000000..0507c56e511 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/signing_cert_v2_struct.go @@ -0,0 +1,26 @@ +package timestamp + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "math/big" +) + +type issuerAndSerial struct { + IssuerName generalNames + SerialNumber *big.Int +} + +type generalNames struct { + Name asn1.RawValue `asn1:"optional,tag:4"` +} + +type essCertIDv2 struct { + HashAlgorithm pkix.AlgorithmIdentifier `asn1:"optional"` // default sha256 + CertHash []byte + IssuerSerial issuerAndSerial `asn1:"optional"` +} + +type signingCertificateV2 struct { + Certs []essCertIDv2 +} diff --git a/vendor/github.com/digitorus/timestamp/timestamp.go b/vendor/github.com/digitorus/timestamp/timestamp.go new file mode 100644 index 00000000000..e4f26903e5f --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/timestamp.go @@ -0,0 +1,680 @@ +// Package timestamp implements the Time-Stamp Protocol (TSP) as specified in +// RFC3161 (Internet X.509 Public Key Infrastructure Time-Stamp Protocol (TSP)). +package timestamp + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "io" + "math/big" + "strconv" + "strings" + "time" + + "github.com/digitorus/pkcs7" +) + +// FailureInfo contains the failure details of an Time-Stamp request. See +// https://tools.ietf.org/html/rfc3161#section-2.4.2 +type FailureInfo int + +const ( + // UnknownFailureInfo mean that no known failure info was provided + UnknownFailureInfo FailureInfo = -1 + // BadAlgorithm defines an unrecognized or unsupported Algorithm Identifier + BadAlgorithm FailureInfo = 0 + // BadRequest indicates that the transaction not permitted or supported + BadRequest FailureInfo = 2 + // BadDataFormat means tha data submitted has the wrong format + BadDataFormat FailureInfo = 5 + // TimeNotAvailable indicates that TSA's time source is not available + TimeNotAvailable FailureInfo = 14 + // UnacceptedPolicy indicates that the requested TSA policy is not supported + // by the TSA + UnacceptedPolicy FailureInfo = 15 + // UnacceptedExtension indicates that the requested extension is not supported + // by the TSA + UnacceptedExtension FailureInfo = 16 + // AddInfoNotAvailable means that the information requested could not be + // understood or is not available + AddInfoNotAvailable FailureInfo = 17 + // SystemFailure indicates that the request cannot be handled due to system + // failure + SystemFailure FailureInfo = 25 +) + +func (f FailureInfo) String() string { + switch f { + case BadAlgorithm: + return "unrecognized or unsupported Algorithm Identifier" + case BadRequest: + return "transaction not permitted or supported" + case BadDataFormat: + return "the data submitted has the wrong format" + case TimeNotAvailable: + return "the TSA's time source is not available" + case UnacceptedPolicy: + return "the requested TSA policy is not supported by the TSA" + case UnacceptedExtension: + return "the requested extension is not supported by the TSA" + case AddInfoNotAvailable: + return "the additional information requested could not be understood or is not available" + case SystemFailure: + return "the request cannot be handled due to system failure" + default: + return "unknown failure" + } +} + +// Status contains the status of an Time-Stamp request. See +// https://tools.ietf.org/html/rfc3161#section-2.4.2 +type Status int + +const ( + // Granted PKIStatus contains the value zero a TimeStampToken, as requested, + // is present. + Granted Status = 0 + // GrantedWithMods PKIStatus contains the value one a TimeStampToken, with + // modifications, is present. + GrantedWithMods Status = 1 + // Rejection PKIStatus + Rejection Status = 2 + // Waiting PKIStatus + Waiting Status = 3 + // RevocationWarning PKIStatus + RevocationWarning Status = 4 + // RevocationNotification PKIStatus + RevocationNotification Status = 5 +) + +func (s Status) String() string { + switch s { + case Granted: + return "the request is granted" + case GrantedWithMods: + return "the request is granted with modifications" + case Rejection: + return "the request is rejected" + case Waiting: + return "the request is waiting" + case RevocationWarning: + return "revocation is imminent" + case RevocationNotification: + return "revocation has occurred" + default: + return "unknown status: " + strconv.Itoa(int(s)) + } +} + +// ParseError results from an invalid Time-Stamp request or response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// Request represents an Time-Stamp request. See +// https://tools.ietf.org/html/rfc3161#section-2.4.1 +type Request struct { + HashAlgorithm crypto.Hash + HashedMessage []byte + + // Certificates indicates if the TSA needs to return the signing certificate + // and optionally any other certificates of the chain as part of the response. + Certificates bool + + // The TSAPolicyOID field, if provided, indicates the TSA policy under + // which the TimeStampToken SHOULD be provided + TSAPolicyOID asn1.ObjectIdentifier + + // The nonce, if provided, allows the client to verify the timeliness of + // the response. + Nonce *big.Int + + // Extensions contains raw X.509 extensions from the Extensions field of the + // Time-Stamp request. When parsing requests, this can be used to extract + // non-critical extensions that are not parsed by this package. When + // marshaling OCSP requests, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing Time-Stamp requests, + // see Extensions. + ExtraExtensions []pkix.Extension +} + +// ParseRequest parses an timestamp request in DER form. +func ParseRequest(bytes []byte) (*Request, error) { + var err error + var rest []byte + var req request + + if rest, err = asn1.Unmarshal(bytes, &req); err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in Time-Stamp request") + } + + if len(req.MessageImprint.HashedMessage) == 0 { + return nil, ParseError("Time-Stamp request contains no hashed message") + } + + hashFunc := getHashAlgorithmFromOID(req.MessageImprint.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("Time-Stamp request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + HashedMessage: req.MessageImprint.HashedMessage, + Certificates: req.CertReq, + Nonce: req.Nonce, + TSAPolicyOID: req.ReqPolicy, + Extensions: req.Extensions, + }, nil +} + +// Marshal marshals the Time-Stamp request to ASN.1 DER encoded form. +func (req *Request) Marshal() ([]byte, error) { + request := request{ + Version: 1, + MessageImprint: messageImprint{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: getOIDFromHashAlgorithm(req.HashAlgorithm), + Parameters: asn1.RawValue{ + Tag: 5, /* ASN.1 NULL */ + }, + }, + HashedMessage: req.HashedMessage, + }, + CertReq: req.Certificates, + Extensions: req.ExtraExtensions, + } + + if req.TSAPolicyOID != nil { + request.ReqPolicy = req.TSAPolicyOID + } + if req.Nonce != nil { + request.Nonce = req.Nonce + } + reqBytes, err := asn1.Marshal(request) + if err != nil { + return nil, err + } + return reqBytes, nil +} + +// Timestamp represents an Time-Stamp. See: +// https://tools.ietf.org/html/rfc3161#section-2.4.1 +type Timestamp struct { + // Timestamp token part of raw ASN.1 DER content. + RawToken []byte + + HashAlgorithm crypto.Hash + HashedMessage []byte + + Time time.Time + Accuracy time.Duration + SerialNumber *big.Int + Policy asn1.ObjectIdentifier + Ordering bool + Nonce *big.Int + Qualified bool + + Certificates []*x509.Certificate + + // If set to true, includes TSA certificate in timestamp response + AddTSACertificate bool + + // Extensions contains raw X.509 extensions from the Extensions field of the + // Time-Stamp. When parsing time-stamps, this can be used to extract + // non-critical extensions that are not parsed by this package. When + // marshaling time-stamps, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // Time-Stamp response. Values override any extensions that would otherwise + // be produced based on the other fields. The ExtraExtensions field is not + // populated when parsing Time-Stamp responses, see Extensions. + ExtraExtensions []pkix.Extension +} + +// ParseResponse parses an Time-Stamp response in DER form containing a +// TimeStampToken. +// +// Invalid signatures or parse failures will result in a ParseError. Error +// responses will result in a ResponseError. +func ParseResponse(bytes []byte) (*Timestamp, error) { + var err error + var rest []byte + var resp response + + if rest, err = asn1.Unmarshal(bytes, &resp); err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in Time-Stamp response") + } + + if resp.Status.Status > 0 { + var fis string + fi := resp.Status.FailureInfo() + if fi != UnknownFailureInfo { + fis = fi.String() + } + return nil, fmt.Errorf("%s: %s (%v)", + resp.Status.Status.String(), + strings.Join(resp.Status.StatusString, ","), + fis) + } + + if len(resp.TimeStampToken.Bytes) == 0 { + return nil, ParseError("no pkcs7 data in Time-Stamp response") + } + + return Parse(resp.TimeStampToken.FullBytes) +} + +// Parse parses an Time-Stamp in DER form. If the time-stamp contains a +// certificate then the signature over the response is checked. +// +// Invalid signatures or parse failures will result in a ParseError. Error +// responses will result in a ResponseError. +func Parse(bytes []byte) (*Timestamp, error) { + var addTSACertificate bool + p7, err := pkcs7.Parse(bytes) + if err != nil { + return nil, err + } + + if len(p7.Certificates) > 0 { + if err = p7.Verify(); err != nil { + return nil, err + } + addTSACertificate = true + } else { + addTSACertificate = false + } + + var inf tstInfo + if _, err = asn1.Unmarshal(p7.Content, &inf); err != nil { + return nil, err + } + + if len(inf.MessageImprint.HashedMessage) == 0 { + return nil, ParseError("Time-Stamp response contains no hashed message") + } + + ret := &Timestamp{ + RawToken: bytes, + HashedMessage: inf.MessageImprint.HashedMessage, + Time: inf.Time, + Accuracy: time.Duration((time.Second * time.Duration(inf.Accuracy.Seconds)) + + (time.Millisecond * time.Duration(inf.Accuracy.Milliseconds)) + + (time.Microsecond * time.Duration(inf.Accuracy.Microseconds))), + SerialNumber: inf.SerialNumber, + Policy: inf.Policy, + Ordering: inf.Ordering, + Nonce: inf.Nonce, + Certificates: p7.Certificates, + AddTSACertificate: addTSACertificate, + Extensions: inf.Extensions, + } + + ret.HashAlgorithm = getHashAlgorithmFromOID(inf.MessageImprint.HashAlgorithm.Algorithm) + if ret.HashAlgorithm == crypto.Hash(0) { + return nil, ParseError("Time-Stamp response uses unknown hash function") + } + + if oidInExtensions(asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 3}, inf.Extensions) { + ret.Qualified = true + } + return ret, nil +} + +// RequestOptions contains options for constructing timestamp requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the timestamp request. If zero, SHA-256 will be used. + Hash crypto.Hash + + // Certificates sets Request.Certificates + Certificates bool + + // The TSAPolicyOID field, if provided, indicates the TSA policy under + // which the TimeStampToken SHOULD be provided + TSAPolicyOID asn1.ObjectIdentifier + + // The nonce, if provided, allows the client to verify the timeliness of + // the response. + Nonce *big.Int +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + return crypto.SHA256 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, timestamp request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(r io.Reader, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + b := make([]byte, h.Size()) + for { + n, err := r.Read(b) + if err == io.EOF { + break + } + + _, err = h.Write(b[:n]) + if err != nil { + return nil, fmt.Errorf("failed to create hash") + } + } + + req := &Request{ + HashAlgorithm: opts.hash(), + HashedMessage: h.Sum(nil), + } + if opts != nil { + req.Certificates = opts.Certificates + } + if opts != nil && opts.TSAPolicyOID != nil { + req.TSAPolicyOID = opts.TSAPolicyOID + } + if opts != nil && opts.Nonce != nil { + req.Nonce = opts.Nonce + } + return req.Marshal() +} + +// CreateResponseWithOpts returns a DER-encoded timestamp response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the responder's name field, and the +// certificate itself is provided alongside the timestamp response signature. +func (t *Timestamp) CreateResponseWithOpts(signingCert *x509.Certificate, priv crypto.Signer, opts crypto.SignerOpts) ([]byte, error) { + messageImprint := getMessageImprint(t.HashAlgorithm, t.HashedMessage) + + tsaSerialNumber, err := generateTSASerialNumber() + if err != nil { + return nil, err + } + tstInfo, err := t.populateTSTInfo(messageImprint, t.Policy, tsaSerialNumber, signingCert) + if err != nil { + return nil, err + } + signature, err := t.generateSignedData(tstInfo, priv, signingCert, opts) + if err != nil { + return nil, err + } + timestampRes := response{ + Status: pkiStatusInfo{ + Status: Granted, + }, + TimeStampToken: asn1.RawValue{FullBytes: signature}, + } + tspResponseBytes, err := asn1.Marshal(timestampRes) + if err != nil { + return nil, err + } + return tspResponseBytes, nil +} + +// CreateResponse returns a DER-encoded timestamp response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the responder's name field, and the +// certificate itself is provided alongside the timestamp response signature. +// +// This function is equivalent to CreateResponseWithOpts, using a SHA256 hash. +// +// Deprecated: Use CreateResponseWithOpts instead. +func (t *Timestamp) CreateResponse(signingCert *x509.Certificate, priv crypto.Signer) ([]byte, error) { + return t.CreateResponseWithOpts(signingCert, priv, crypto.SHA256) +} + +// CreateErrorResponse is used to create response other than granted and granted with mod status +func CreateErrorResponse(pkiStatus Status, pkiFailureInfo FailureInfo) ([]byte, error) { + var bs asn1.BitString + setFlag(&bs, int(pkiFailureInfo)) + + timestampRes := response{ + Status: pkiStatusInfo{ + Status: pkiStatus, + FailInfo: bs, + }, + } + tspResponseBytes, err := asn1.Marshal(timestampRes) + if err != nil { + return nil, err + } + return tspResponseBytes, nil +} + +func setFlag(bs *asn1.BitString, i int) { + for l := len(bs.Bytes); l < 4; l++ { + (*bs).Bytes = append((*bs).Bytes, byte(0)) + (*bs).BitLength = len((*bs).Bytes) * 8 + } + b := i / 8 + p := uint(7 - (i - 8*b)) + (*bs).Bytes[b] = (*bs).Bytes[b] | (1 << p) + bs.BitLength = asn1BitLength(bs.Bytes) + bs.Bytes = bs.Bytes[0 : (bs.BitLength/8)+1] +} + +func getMessageImprint(hashAlgorithm crypto.Hash, hashedMessage []byte) messageImprint { + messageImprint := messageImprint{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: getOIDFromHashAlgorithm(hashAlgorithm), + Parameters: asn1.NullRawValue, + }, + HashedMessage: hashedMessage, + } + return messageImprint +} + +func generateTSASerialNumber() (*big.Int, error) { + randomBytes := make([]byte, 20) + _, err := rand.Read(randomBytes) + if err != nil { + return nil, err + } + serialNumber := big.NewInt(0) + serialNumber = serialNumber.SetBytes(randomBytes) + return serialNumber, nil +} + +func (t *Timestamp) populateTSTInfo(messageImprint messageImprint, policyOID asn1.ObjectIdentifier, tsaSerialNumber *big.Int, tsaCert *x509.Certificate) ([]byte, error) { + dirGeneralName, err := asn1.Marshal(asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: tsaCert.RawSubject}) + if err != nil { + return nil, err + } + tstInfo := tstInfo{ + Version: 1, + Policy: policyOID, + MessageImprint: messageImprint, + SerialNumber: tsaSerialNumber, + Time: t.Time, + TSA: asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: dirGeneralName}, + Ordering: t.Ordering, + } + if t.Nonce != nil { + tstInfo.Nonce = t.Nonce + } + if t.Accuracy != 0 { + if t.Accuracy < time.Microsecond { + // Round up to 1 microsecond if accuracy is lower than 1 microsecond but greater than 0 nanosecond + tstInfo.Accuracy.Microseconds = 1 + } else { + seconds := t.Accuracy.Truncate(time.Second) + tstInfo.Accuracy.Seconds = int64(seconds.Seconds()) + ms := (t.Accuracy - seconds).Truncate(time.Millisecond) + if ms != 0 { + tstInfo.Accuracy.Milliseconds = int64(ms.Milliseconds()) + } + microSeconds := (t.Accuracy - seconds - ms).Truncate(time.Microsecond) + if microSeconds != 0 { + tstInfo.Accuracy.Microseconds = int64(microSeconds.Microseconds()) + } + } + } + if len(t.ExtraExtensions) != 0 { + tstInfo.Extensions = t.ExtraExtensions + } + if t.Qualified && !oidInExtensions(asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 3}, t.ExtraExtensions) { + qcStatements := []qcStatement{{ + StatementID: asn1.ObjectIdentifier{0, 4, 0, 19422, 1, 1}, + }} + asn1QcStats, err := asn1.Marshal(qcStatements) + if err != nil { + return nil, err + } + tstInfo.Extensions = append(tstInfo.Extensions, pkix.Extension{ + Id: []int{1, 3, 6, 1, 5, 5, 7, 1, 3}, + Value: asn1QcStats, + Critical: false, + }) + } + tstInfoBytes, err := asn1.Marshal(tstInfo) + if err != nil { + return nil, err + } + return tstInfoBytes, nil +} + +func (t *Timestamp) populateSigningCertificateV2Ext(certificate *x509.Certificate) ([]byte, error) { + if !t.HashAlgorithm.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + if t.HashAlgorithm.HashFunc() == crypto.SHA1 { + return nil, fmt.Errorf("for SHA1 use ESSCertID instead of ESSCertIDv2") + } + + h := t.HashAlgorithm.HashFunc().New() + _, err := h.Write(certificate.Raw) + if err != nil { + return nil, fmt.Errorf("failed to create hash") + } + + var hashAlg pkix.AlgorithmIdentifier + + // HashAlgorithm defaults to SHA256 + if t.HashAlgorithm.HashFunc() != crypto.SHA256 { + hashAlg = pkix.AlgorithmIdentifier{ + Algorithm: hashOIDs[t.HashAlgorithm.HashFunc()], + Parameters: asn1.NullRawValue, + } + } + + signingCertificateV2 := signingCertificateV2{ + Certs: []essCertIDv2{{ + HashAlgorithm: hashAlg, + CertHash: h.Sum(nil), + IssuerSerial: issuerAndSerial{ + IssuerName: generalNames{ + Name: asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: certificate.RawIssuer}, + }, + SerialNumber: certificate.SerialNumber, + }, + }}, + } + signingCertV2Bytes, err := asn1.Marshal(signingCertificateV2) + if err != nil { + return nil, err + } + return signingCertV2Bytes, nil +} + +// digestAlgorithmToOID converts the hash func to the corresponding OID. +// This should have parity with [pkcs7.getHashForOID]. +func digestAlgorithmToOID(hash crypto.Hash) (asn1.ObjectIdentifier, error) { + switch hash { + case crypto.SHA1: + return pkcs7.OIDDigestAlgorithmSHA1, nil + case crypto.SHA256: + return pkcs7.OIDDigestAlgorithmSHA256, nil + case crypto.SHA384: + return pkcs7.OIDDigestAlgorithmSHA384, nil + case crypto.SHA512: + return pkcs7.OIDDigestAlgorithmSHA512, nil + } + return nil, pkcs7.ErrUnsupportedAlgorithm +} + +func (t *Timestamp) generateSignedData(tstInfo []byte, signer crypto.Signer, certificate *x509.Certificate, opts crypto.SignerOpts) ([]byte, error) { + signedData, err := pkcs7.NewSignedData(tstInfo) + if err != nil { + return nil, err + } + + alg, err := digestAlgorithmToOID(opts.HashFunc()) + if err != nil { + return nil, err + } + signedData.SetDigestAlgorithm(alg) + signedData.SetContentType(asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 1, 4}) + signedData.GetSignedData().Version = 3 + + signingCertV2Bytes, err := t.populateSigningCertificateV2Ext(certificate) + if err != nil { + return nil, err + } + + signerInfoConfig := pkcs7.SignerInfoConfig{ + ExtraSignedAttributes: []pkcs7.Attribute{ + { + Type: asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 2, 47}, + Value: asn1.RawValue{FullBytes: signingCertV2Bytes}, + }, + }, + } + if !t.AddTSACertificate { + signerInfoConfig.SkipCertificates = true + } + + if len(t.Certificates) > 0 { + err = signedData.AddSignerChain(certificate, signer, t.Certificates, signerInfoConfig) + } else { + err = signedData.AddSigner(certificate, signer, signerInfoConfig) + } + if err != nil { + return nil, err + } + + signature, err := signedData.Finish() + if err != nil { + return nil, err + } + return signature, nil +} + +// copied from crypto/x509 package +// oidNotInExtensions reports whether an extension with the given oid exists in +// extensions. +func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { + for _, e := range extensions { + if e.Id.Equal(oid) { + return true + } + } + return false +} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml new file mode 100644 index 00000000000..841c4281e23 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes new file mode 100644 index 00000000000..d020be8ea4e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 00000000000..87c3bd3e66e --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +coverage.txt +*.cov +.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml new file mode 100644 index 00000000000..06190ac055f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - noinlineerr + - nonamedreturns + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 00000000000..e005d4b37b7 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,27 @@ +# OpenAPI analysis [![Build Status](https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. + +## What's inside? + +* An analyzer providing methods to walk the functional content of a specification +* A spec flattener producing a self-contained document bundle, while preserving `$ref`s +* A spec merger ("mixin") to merge several spec documents into a primary spec +* A spec "fixer" ensuring that response descriptions are non empty + +[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis) + +## FAQ + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 00000000000..4870ad07beb --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,1054 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "fmt" + "maps" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag/mangling" +) + +const ( + allocLargeMap = 150 + allocMediumMap = 64 + allocSmallMap = 10 +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas + // and $ref are not supported here. However it is possible to analyze this. + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +type enumAnalysis struct { + parameters map[string][]any + headers map[string][]any + items map[string][]any + schemas map[string][]any + allEnums map[string][]any +} + +func (p *enumAnalysis) addEnum(key string, enum []any) { + p.allEnums["#"+key] = enum +} + +func (p *enumAnalysis) addParameterEnum(key string, enum []any) { + p.parameters["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addHeaderEnum(key string, enum []any) { + p.headers["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addItemsEnum(key string, enum []any) { + p.items["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addSchemaEnum(key string, enum []any) { + p.schemas["#"+key] = enum + p.addEnum(key, enum) +} + +// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec. +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + enums enumAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + a.reset() + a.initialize() + + return a +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + result := [][]SecurityRequirement{} + for _, scheme := range schemes { + if len(scheme) == 0 { + // append a zero object for anonymous + result = append(result, []SecurityRequirement{{}}) + + continue + } + + var reqs []SecurityRequirement + for k, v := range scheme { + if v == nil { + v = []string{} + } + reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) + } + + result = append(result, reqs) + } + + return result +} + +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { + result := make(map[string]spec.SecurityScheme) + + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + + result := make(map[string]spec.SecurityScheme) + for _, reqs := range requirements { + for _, v := range reqs { + if v.Name == "" { + // optional requirement + continue + } + + if _, ok := result[v.Name]; ok { + // duplicate requirement + continue + } + + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + } + + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + // TODO: this should be x-go-name + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + mangler := mangling.NewNameMangler() + + return mangler.ToGoName(param.Name) +} + +// ErrorOnParamFunc is a callback function to be invoked +// whenever an error is encountered while resolving references +// on parameters. +// +// This function takes as input the spec.Parameter which triggered the +// error and the error itself. +// +// If the callback function returns false, the calling function should bail. +// +// If it returns true, the calling function should continue evaluating parameters. +// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). +type ErrorOnParamFunc func(spec.Parameter, error) bool + +// ParametersFor the specified operation id. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + return s.SafeParametersFor(operationID, nil) +} + +// SafeParametersFor the specified operation id. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag, callmeOnError) + s.paramsAsMap(op.Parameters, bag, callmeOnError) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + + return res + } + + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) //#nosec + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) //#nosec + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) //#nosec + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) //#nosec + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) //#nosec + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) //#nosec + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) //#nosec + } + } + + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + return s.SafeParamsFor(method, path, nil) +} + +// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res, callmeOnError) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) + } + + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + + return op, fn + } + + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + + return +} + +// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). +// +// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid +// Swagger 2.0 spec. +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + + return +} + +// AllReferences returns all the references found in the document, with possible duplicates +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + + return +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} + +// ParameterEnums returns all the enums found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterEnums() map[string][]any { + return cloneEnumMap(s.enums.parameters) +} + +// HeaderEnums returns all the enums found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderEnums() map[string][]any { + return cloneEnumMap(s.enums.headers) +} + +// ItemsEnums returns all the enums found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsEnums() map[string][]any { + return cloneEnumMap(s.enums.items) +} + +// SchemaEnums returns all the enums found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaEnums() map[string][]any { + return cloneEnumMap(s.enums.schemas) +} + +// AllEnums returns all the enums found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllEnums() map[string][]any { + return cloneEnumMap(s.enums.allEnums) +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + + return result +} + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { + for _, param := range parameters { + pr := param + if pr.Ref.String() == "" { + res[mapKeyFromParam(&pr)] = pr + + continue + } + + // resolve $ref + if callmeOnError == nil { + callmeOnError = func(_ spec.Parameter, err error) bool { + panic(err) + } + } + + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + if callmeOnError(param, ErrInvalidRef(pr.Ref.String())) { + continue + } + + break + } + + objAsParam, ok := obj.(spec.Parameter) + if !ok { + if callmeOnError(param, ErrInvalidParameterRef(pr.Ref.String())) { + continue + } + + break + } + + pr = objAsParam + res[mapKeyFromParam(&pr)] = pr + } +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, allocLargeMap) + s.produces = make(map[string]struct{}, allocLargeMap) + s.authSchemes = make(map[string]struct{}, allocLargeMap) + s.operations = make(map[string]map[string]*spec.Operation, allocLargeMap) + s.allSchemas = make(map[string]SchemaRef, allocLargeMap) + s.allOfs = make(map[string]SchemaRef, allocLargeMap) + s.references.schemas = make(map[string]spec.Ref, allocLargeMap) + s.references.pathItems = make(map[string]spec.Ref, allocLargeMap) + s.references.responses = make(map[string]spec.Ref, allocLargeMap) + s.references.parameters = make(map[string]spec.Ref, allocLargeMap) + s.references.items = make(map[string]spec.Ref, allocLargeMap) + s.references.headerItems = make(map[string]spec.Ref, allocLargeMap) + s.references.parameterItems = make(map[string]spec.Ref, allocLargeMap) + s.references.allRefs = make(map[string]spec.Ref, allocLargeMap) + s.patterns.parameters = make(map[string]string, allocLargeMap) + s.patterns.headers = make(map[string]string, allocLargeMap) + s.patterns.items = make(map[string]string, allocLargeMap) + s.patterns.schemas = make(map[string]string, allocLargeMap) + s.patterns.allPatterns = make(map[string]string, allocLargeMap) + s.enums.parameters = make(map[string][]any, allocLargeMap) + s.enums.headers = make(map[string][]any, allocLargeMap) + s.enums.items = make(map[string][]any, allocLargeMap) + s.enums.schemas = make(map[string][]any, allocLargeMap) + s.enums.allEnums = make(map[string][]any, allocLargeMap) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) //#nosec + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + if len(parameter.Enum) > 0 { + s.enums.addParameterEnum(refPref, parameter.Enum) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", response.Schema, refPref) + } + } + + for name := range s.spec.Definitions { + schema := s.spec.Definitions[name] + s.analyzeSchema(name, &schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + // Currently, operations declared via pathItem $ref are known only after expansion + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } + if len(items.Enum) > 0 { + s.enums.addItemsEnum(refPref, items.Enum) + } +} + +func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + s.analyzeParameter(prefix, i, param) + } + + if op.Responses == nil { + return + } + + if op.Responses.Default != nil { + s.analyzeDefaultResponse(prefix, op.Responses.Default) + } + + for k, res := range op.Responses.StatusCodeResponses { + s.analyzeResponse(prefix, k, res) + } +} + +func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { + refPref := slashpath.Join(prefix, "responses", "default") + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, res) + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) //#nosec + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + + if len(schema.Enum) > 0 { + s.enums.addSchemaEnum(refURI, schema.Enum) + } + + for k, v := range schema.Definitions { + s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) + } + + for k, v := range schema.Properties { + s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) + } + + for k, v := range schema.PatternProperties { + // NOTE: swagger 2.0 does not support PatternProperties. + // However it is possible to analyze this in a schema + s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) + } + + for i := range schema.AllOf { + v := &schema.AllOf[i] + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + + for i := range schema.AnyOf { + v := &schema.AnyOf[i] + // NOTE: swagger 2.0 does not support anyOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + + for i := range schema.OneOf { + v := &schema.OneOf[i] + // NOTE: swagger 2.0 does not support oneOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + + if schema.Not != nil { + // NOTE: swagger 2.0 does not support "not" constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema("not", schema.Not, refURI) + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: swagger 2.0 does not support AdditionalItems. + // However it is possible to analyze this in a schema + s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", schema.Items.Schema, refURI) + } + + for i := range schema.Items.Schemas { + sch := &schema.Items.Schemas[i] + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + maps.Copy(res, source) + + return res +} + +func cloneEnumMap(source map[string][]any) map[string][]any { + res := make(map[string][]any, len(source)) + maps.Copy(res, source) + + return res +} diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go new file mode 100644 index 00000000000..d490eab6063 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/debug.go @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "os" + + "github.com/go-openapi/analysis/internal/debug" +) + +var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go new file mode 100644 index 00000000000..9d41371a9f0 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +/* +Package analysis provides methods to work with a Swagger specification document from +package go-openapi/spec. + +## Analyzing a specification + +An analysed specification object (type Spec) provides methods to work with swagger definition. + +## Flattening or expanding a specification + +Flattening a specification bundles all remote $ref in the main spec document. +Depending on flattening options, additional preprocessing may take place: + - full flattening: replacing all inline complex constructs by a named entry in #/definitions + - expand: replace all $ref's in the document by their expanded content + +## Merging several specifications + +Mixin several specifications merges all Swagger constructs, and warns about found conflicts. + +## Fixing a specification + +Unmarshalling a specification with golang json unmarshalling may lead to +some unwanted result on present but empty fields. + +## Analyzing a Swagger schema + +Swagger schemas are analyzed to determine their complexity and qualify their content. +*/ +package analysis diff --git a/vendor/github.com/go-openapi/analysis/errors.go b/vendor/github.com/go-openapi/analysis/errors.go new file mode 100644 index 00000000000..540e159a23c --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/errors.go @@ -0,0 +1,56 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "errors" + "fmt" +) + +type analysisError string + +const ( + ErrAnalysis analysisError = "analysis error" + ErrNoSchema analysisError = "no schema to analyze" +) + +func (e analysisError) Error() string { + return string(e) +} + +func ErrAtKey(key string, err error) error { + return errors.Join( + fmt.Errorf("key %s: %w", key, err), + ErrAnalysis, + ) +} + +func ErrInvalidRef(key string) error { + return fmt.Errorf("invalid reference: %q: %w", key, ErrAnalysis) +} + +func ErrInvalidParameterRef(key string) error { + return fmt.Errorf("resolved reference is not a parameter: %q: %w", key, ErrAnalysis) +} + +func ErrResolveSchema(err error) error { + return errors.Join( + fmt.Errorf("could not resolve schema: %w", err), + ErrAnalysis, + ) +} + +func ErrRewriteRef(key string, target any, err error) error { + return errors.Join( + fmt.Errorf("failed to rewrite ref for key %q at %v: %w", key, target, err), + ErrAnalysis, + ) +} + +func ErrInlineDefinition(key string, err error) error { + return errors.Join( + fmt.Errorf("error while creating definition %q from inline schema: %w", key, err), + ErrAnalysis, + ) +} diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go new file mode 100644 index 00000000000..74becbbe496 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixer.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import "github.com/go-openapi/spec" + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + for k, v := range s.Responses { + FixEmptyDesc(&v) //#nosec + s.Responses[k] = v + } + + if s.Paths == nil { + return + } + + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) //#nosec + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 00000000000..1c7a49c034f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,796 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "log" + "path" + "slices" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +// newRef stores information about refs created during the flattening process +type newRef struct { + key string + newName string + path string + isOAIGen bool + resolved bool + schema *spec.Schema + parents []string +} + +// context stores intermediary results from flatten +type context struct { + newRefs map[string]*newRef + warnings []string + resolved map[string]string +} + +func newContext() *context { + return &context{ + newRefs: make(map[string]*newRef, allocMediumMap), + warnings: make([]string, 0), + resolved: make(map[string]string, allocMediumMap), + } +} + +// Flatten an analyzed spec and produce a self-contained spec bundle. +// +// There is a minimal and a full flattening mode. +// +// Minimally flattening a spec means: +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") +// +// A minimally flattened spec thus guarantees the following properties: +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique +// +// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they +// represent a complex schema or express commonality in the spec. +// Otherwise, they are simply expanded. +// Self-referencing JSON pointers cannot resolve to a type and trigger an error. +// +// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. +// +// Fully flattening a spec means: +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// +// By complex, we mean every JSON object with some properties. +// Arrays, when they do not define a tuple, +// or empty objects with or without additionalProperties, are not considered complex and remain inline. +// +// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions +// have been created. +// +// Available flattening options: +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// +// NOTE: expansion removes all $ref save circular $ref, which remain in place +// +// TODO: additional options +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... +func Flatten(opts FlattenOpts) error { + debugLog("FlattenOpts: %#v", opts) + + opts.flattenContext = newContext() + + // 1. Recursively expand responses, parameters, path items and items in simple schemas. + // + // This simplifies the spec and leaves only the $ref's in schema objects. + if err := expand(&opts); err != nil { + return err + } + + // 2. Strip the current document from absolute $ref's that actually a in the root, + // so we can recognize them as proper definitions + // + // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped + if err := normalizeRef(&opts); err != nil { + return err + } + + // 3. Optionally remove shared parameters and responses already expanded (now unused). + // + // Operation parameters (i.e. under paths) remain. + if opts.RemoveUnused { + removeUnusedShared(&opts) + } + + // 4. Import all remote references. + if err := importReferences(&opts); err != nil { + return err + } + + // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) + if !opts.Minimal && !opts.Expand { + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + } + + // 6. Rewrite JSON pointers other than $ref to named definitions + // and attempt to resolve conflicting names whenever possible. + if err := stripPointersAndOAIGen(&opts); err != nil { + return err + } + + // 7. Strip the spec from unused definitions + if opts.RemoveUnused { + removeUnused(&opts) + } + + // 8. Issue warning notifications, if any + opts.croak() + + // TODO: simplify known schema patterns to flat objects with properties + // examples: + // - lift simple allOf object, + // - empty allOf with validation only or extensions only + // - rework allOf arrays + // - rework allOf additionalProperties + + return nil +} + +func expand(opts *FlattenOpts) error { + if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { + return err + } + + opts.Spec.reload() // re-analyze + + return nil +} + +// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: +// leading absolute file in $ref is stripped +func normalizeRef(opts *FlattenOpts) error { + debugLog("normalizeRef") + + altered := false + for k, w := range opts.Spec.references.allRefs { + if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS + continue + } + + altered = true + debugLog("stripping absolute path for: %s", w.String()) + + // strip the base path from definition + if err := replace.UpdateRef(opts.Swagger(), k, + spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { + return err + } + } + + if altered { + opts.Spec.reload() // re-analyze + } + + return nil +} + +func removeUnusedShared(opts *FlattenOpts) { + opts.Swagger().Parameters = nil + opts.Swagger().Responses = nil + + opts.Spec.reload() // re-analyze +} + +func importReferences(opts *FlattenOpts) error { + var ( + imported bool + err error + ) + + for !imported && err == nil { + // iteratively import remote references until none left. + // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") + imported, err = importExternalReferences(opts) + + opts.Spec.reload() // re-analyze + } + + return err +} + +// nameInlinedSchemas replaces every complex inline construct by a named definition. +func nameInlinedSchemas(opts *FlattenOpts) error { + debugLog("nameInlinedSchemas") + + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { + continue + } + + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return ErrAtKey(key, err) + } + + if asch.isAnalyzedAsComplex() { // move complex schemas to definitions + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func removeUnused(opts *FlattenOpts) { + for removeUnusedSinglePass(opts) { + // continue until no unused definition remains + } +} + +func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) { + expected := make(map[string]struct{}) + for k := range opts.Swagger().Definitions { + expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} + } + + for _, k := range opts.Spec.AllDefinitionReferences() { + delete(expected, k) + } + + for k := range expected { + hasRemoved = true + debugLog("removing unused definition %s", path.Base(k)) + if opts.Verbose { + log.Printf("info: removing unused definition: %s", path.Base(k)) + } + delete(opts.Swagger().Definitions, path.Base(k)) + } + + opts.Spec.reload() // re-analyze + + return hasRemoved +} + +func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { + // rewrite ref with already resolved external ref (useful for cyclical refs): + // rewrite external refs to local ones + debugLog("resolving known ref [%s] to %s", refStr, newName) + + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + return nil +} + +func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { + var ( + isOAIGen bool + newName string + ) + + debugLog("resolving schema from remote $ref [%s]", refStr) + + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return ErrResolveSchema(err) + } + + // at this stage only $ref analysis matters + partialAnalyzer := &Spec{ + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + partialAnalyzer.reset() + partialAnalyzer.analyzeSchema("", sch, "/") + + // now rewrite those refs with rebase + for key, ref := range partialAnalyzer.references.allRefs { + if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { + return ErrRewriteRef(key, entry.Ref.String(), err) + } + } + + // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts)) + debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) + + opts.flattenContext.resolved[refStr] = newName + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + + // keep track of created refs + resolved := false + if _, ok := opts.flattenContext.newRefs[key]; ok { + resolved = opts.flattenContext.newRefs[key].resolved + } + + debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) + opts.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + // add the resolved schema to the definitions + schutils.Save(opts.Swagger(), newName, sch) + + return nil +} + +// importExternalReferences iteratively digs remote references and imports them into the main schema. +// +// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. +// +// This returns true when no more remote references can be found. +func importExternalReferences(opts *FlattenOpts) (bool, error) { + debugLog("importExternalReferences") + + groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) + sortedRefStr := make([]string, 0, len(groupedRefs)) + if opts.flattenContext == nil { + opts.flattenContext = newContext() + } + + // sort $ref resolution to ensure deterministic name conflict resolution + for refStr := range groupedRefs { + sortedRefStr = append(sortedRefStr, refStr) + } + sort.Strings(sortedRefStr) + + complete := true + + for _, refStr := range sortedRefStr { + entry := groupedRefs[refStr] + if entry.Ref.HasFragmentOnly { + continue + } + + complete = false + + newName := opts.flattenContext.resolved[refStr] + if newName != "" { + if err := importKnownRef(entry, refStr, newName, opts); err != nil { + return false, err + } + + continue + } + + // resolve schemas + if err := importNewRef(entry, refStr, opts); err != nil { + return false, err + } + } + + // maintains ref index entries + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + + // update tracking with resolved schemas + if r.schema.Ref.String() != "" { + ref := spec.MustCreateRef(r.path) + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) + if err != nil { + return false, ErrResolveSchema(err) + } + + r.schema = sch + } + + if r.path == k { + continue + } + + // update tracking with renamed keys: got a cascade of refs + renamed := *r + renamed.key = r.path + opts.flattenContext.newRefs[renamed.path] = &renamed + + // indirect ref + r.newName = path.Base(k) + r.schema = spec.RefSchema(r.path) + r.path = k + r.isOAIGen = strings.Contains(k, "OAIGen") + } + + return complete, nil +} + +// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. +// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. +func stripPointersAndOAIGen(opts *FlattenOpts) error { + // name all JSON pointers to anonymous documents + if err := namePointers(opts); err != nil { + return err + } + + // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) + hasIntroducedPointerOrInline, ers := stripOAIGen(opts) + if ers != nil { + return ers + } + + // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers + for hasIntroducedPointerOrInline { + if !opts.Minimal { + opts.Spec.reload() // re-analyze + if err := nameInlinedSchemas(opts); err != nil { + return err + } + } + + if err := namePointers(opts); err != nil { + return err + } + + // restrip and re-analyze + var err error + if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { + return err + } + } + + return nil +} + +// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. +// +// A dedupe is deemed unnecessary whenever: +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. +// +// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate +// pointer and name resolution again. +func stripOAIGen(opts *FlattenOpts) (bool, error) { + debugLog("stripOAIGen") + replacedWithComplex := false + + // figure out referers of OAIGen definitions (doing it before the ref start mutating) + for _, r := range opts.flattenContext.newRefs { + updateRefParents(opts.Spec.references.allRefs, r) + } + + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", + k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) + + if !r.isOAIGen || len(r.parents) == 0 { + continue + } + + hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) + if err != nil { + return replacedWithComplex, err + } + + replacedWithComplex = replacedWithComplex || hasReplacedWithComplex + } + + debugLog("replacedWithComplex: %t", replacedWithComplex) + opts.Spec.reload() // re-analyze + + return replacedWithComplex, nil +} + +// updateRefParents updates all parents of an updated $ref +func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { + if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) + return + } + for k, v := range allRefs { + if r.path != v.String() { + continue + } + + found := slices.Contains(r.parents, k) + if !found { + r.parents = append(r.parents, k) + } + } +} + +func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { + replacedWithComplex := false + + pr := sortref.TopmostFirst(r.parents) + + // rewrite first parent schema in hierarchical then lexicographical order + debugLog("rewrite first parent %s with schema", pr[0]) + if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { + // update parent in ref index entry + debugLog("update parent entry: %s", pr[0]) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + + // rewrite other parents to point to first parent + if len(pr) > 1 { + for _, p := range pr[1:] { + replacingRef := spec.MustCreateRef(pr[0]) + + // set complex when replacing ref is an anonymous jsonpointer: further processing may be required + replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath + debugLog("rewrite parent with ref: %s", replacingRef.String()) + + // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). + // Those are stripped later on. + if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { + // update parent in ref index + debugLog("update parent entry: %s", p) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + } + } + + // remove OAIGen definition + debugLog("removing definition %s", path.Base(r.path)) + delete(opts.Swagger().Definitions, path.Base(r.path)) + + // propagate changes in ref index for keys which have this one as a parent + for kk, value := range opts.flattenContext.newRefs { + if kk == k || !value.isOAIGen || value.resolved { + continue + } + + found := false + newParents := make([]string, 0, len(value.parents)) + for _, parent := range value.parents { + switch { + case parent == r.path: + found = true + parent = pr[0] + case strings.HasPrefix(parent, r.path+"/"): + found = true + parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) + } + + newParents = append(newParents, parent) + } + + if found { + value.parents = newParents + } + } + + // mark naming conflict as resolved + debugLog("marking naming conflict resolved for key: %s", r.key) + opts.flattenContext.newRefs[r.key].isOAIGen = false + opts.flattenContext.newRefs[r.key].resolved = true + + // determine if the previous substitution did inline a complex schema + if r.schema != nil && r.schema.Ref.String() == "" { // inline schema + asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return false, err + } + + debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) + replacedWithComplex = replacedWithComplex || path.Dir(pr[0]) != definitionsPath && asch.isAnalyzedAsComplex() + } + + return replacedWithComplex, nil +} + +// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. +// +// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. +// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). +func namePointers(opts *FlattenOpts) error { + debugLog("name pointers") + + refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) + for k, ref := range opts.Spec.references.allRefs { + debugLog("name pointers: %q => %#v", k, ref) + if path.Dir(ref.String()) == definitionsPath { + // this a ref to a top-level definition: ok + continue + } + + result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) + if err != nil { + return ErrAtKey(k, err) + } + + replacingRef := result.Ref + sch := result.Schema + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) + refsToReplace[k] = SchemaRef{ + Name: k, // caller + Ref: replacingRef, // called + Schema: sch, + TopLevel: path.Dir(replacingRef.String()) == definitionsPath, + } + } + + depthFirst := sortref.DepthFirst(refsToReplace) + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + for _, key := range depthFirst { + v := refsToReplace[key] + // update current replacement, which may have been updated by previous changes of deeper elements + result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) + if erd != nil { + return ErrAtKey(key, erd) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + v.Ref = result.Ref + v.Schema = result.Schema + v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath + debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) + + if v.TopLevel { + debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) + + // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref + if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { + return err + } + + continue + } + + if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { + return err + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { + // this is a JSON pointer to an anonymous document (internal or external): + // create a definition for this schema when: + // - it is a complex schema + // - or it is pointed by more than one $ref (i.e. expresses commonality) + // otherwise, expand the pointer (single reference to a simple type) + // + // The named definition for this follows the target's key, not the caller's + debugLog("namePointers at %s for %s", key, v.Ref.String()) + + // qualify the expanded schema + asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if ers != nil { + return ErrAtKey(key, ers) + } + callers := make([]string, 0, allocMediumMap) + + debugLog("looking for callers") + + an := New(opts.Swagger()) + for k, w := range an.references.allRefs { + r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) + if err != nil { + return ErrAtKey(key, err) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() == v.Ref.String() { + callers = append(callers, k) + } + } + + debugLog("callers for %s: %d", v.Ref.String(), len(callers)) + if len(callers) == 0 { + // has already been updated and resolved + return nil + } + + parts := sortref.KeyParts(v.Ref.String()) + debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) + + // identifying edge case when the namer did nothing because we point to a non-schema object + // no definition is created and we expand the $ref for all callers + debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t", + asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(), + ) + + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { + debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) + if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { + return err + } + + // regular case: we named the $ref as a definition, and we move all callers to this new $ref + for _, caller := range callers { + if caller == key { + continue + } + + // move $ref for next to resolve + debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) + c := refsToReplace[caller] + c.Ref = v.Ref + refsToReplace[caller] = c + } + + return nil + } + + // everything that is a simple schema and not factorizable is expanded + debugLog("expand JSON pointer for key=%s", key) + + if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { + return err + } + // NOTE: there is no other caller to update + + return nil +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go new file mode 100644 index 00000000000..475b33c4136 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -0,0 +1,317 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "fmt" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag/mangling" +) + +// InlineSchemaNamer finds a new name for an inlined type +type InlineSchemaNamer struct { + Spec *spec.Swagger + Operations map[string]operations.OpRef + flattenContext *context + opts *FlattenOpts +} + +// Name yields a new name for the inline schema +func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { + debugLog("naming inlined schema at %s", key) + + parts := sortref.KeyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name == "" { + continue + } + + // create unique name + mangle := mangler(isn.opts) + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name)) + + // clone schema + sch := schutils.Clone(schema) + + // replace values on schema + debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName) + if err := replace.RewriteSchemaToRef(isn.Spec, key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return ErrInlineDefinition(newName, err) + } + + // rewrite any dependent $ref pointing to this place, + // when not already pointing to a top-level definition. + // + // NOTE: this is important if such referers use arbitrary JSON pointers. + an := New(isn.Spec) + for k, v := range an.references.allRefs { + r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) + if erd != nil { + return ErrAtKey(k, erd) + } + + if isn.opts.flattenContext != nil { + isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { + continue + } + + debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) + + // rewrite $ref to the new target + if err := replace.UpdateRef(isn.Spec, k, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + // NOTE: this extension is currently not used by go-swagger (provided for information only) + sch.AddExtension("x-go-gen-location", GenLocation(parts)) + + // save cloned schema to definitions + schutils.Save(isn.Spec, newName, sch) + + // keep track of created refs + if isn.flattenContext == nil { + continue + } + + debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) + resolved := false + + if _, ok := isn.flattenContext.newRefs[key]; ok { + resolved = isn.flattenContext.newRefs[key].resolved + } + + isn.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + return nil +} + +// uniqifyName yields a unique name for a definition +func uniqifyName(definitions spec.Definitions, name string) (string, bool) { + isOAIGen := false + if name == "" { + name = "oaiGen" + isOAIGen = true + } + + if len(definitions) == 0 { + return name, isOAIGen + } + + unq := true + for k := range definitions { + if strings.EqualFold(k, name) { + unq = false + + break + } + } + + if unq { + return name, isOAIGen + } + + name += "OAIGen" + isOAIGen = true + var idx int + unique := name + _, known := definitions[unique] + + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + + return unique, isOAIGen +} + +func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { + var ( + baseNames [][]string + startIndex int + ) + + switch { + case parts.IsOperation(): + baseNames, startIndex = namesForOperation(parts, operations) + case parts.IsDefinition(): + baseNames, startIndex = namesForDefinition(parts) + default: + // this a non-standard pointer: build a name by concatenating its parts + baseNames = [][]string{parts} + startIndex = len(baseNames) + 1 + } + + result := make([]string, 0, len(baseNames)) + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, partAdder(aschema)) + if nm == "" { + continue + } + + result = append(result, nm) + } + sort.Strings(result) + + debugLog("names from parts: %v => %v", parts, result) + return result +} + +func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + baseNames, startIndex = namesForParam(parts, operations) + } + + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + + return baseNames, startIndex +} + +const ( + minStartIndex = 2 + minSegments = 2 +) + +func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { + nm := parts.DefinitionName() + if nm != "" { + return [][]string{{parts.DefinitionName()}}, minStartIndex + } + + return [][]string{}, 0 +} + +// partAdder knows how to interpret a schema when it comes to build a name from parts +func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { + return func(part string) []string { + segments := make([]string, 0, minSegments) + + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + + if part == "additionalItems" { + segments = append(segments, part) + } + + return segments + } + + segments = append(segments, part) + + return segments + } +} + +func mangler(o *FlattenOpts) func(string) string { + if o.KeepNames { + return func(in string) string { return in } + } + mangler := mangling.NewNameMangler() + + return mangler.ToJSONName +} + +func nameFromRef(ref spec.Ref, o *FlattenOpts) string { + mangle := mangler(o) + + u := ref.GetURL() + if u.Fragment != "" { + return mangle(path.Base(u.Fragment)) + } + + if u.Path != "" { + bn := path.Base(u.Path) + if bn != "" && bn != "/" { + ext := path.Ext(bn) + if ext != "" { + return mangle(bn[:len(bn)-len(ext)]) + } + + return mangle(bn) + } + } + + return mangle(strings.ReplaceAll(u.Host, ".", " ")) +} + +// GenLocation indicates from which section of the specification (models or operations) a definition has been created. +// +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided +// for information only. +func GenLocation(parts sortref.SplitKey) string { + switch { + case parts.IsOperation(): + return "operations" + case parts.IsDefinition(): + return "models" + default: + return "" + } +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go new file mode 100644 index 00000000000..d8fc25cf586 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -0,0 +1,82 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "log" + + "github.com/go-openapi/spec" +) + +// FlattenOpts configuration for flattening a swagger specification. +// +// The BasePath parameter is used to locate remote relative $ref found in the specification. +// This path is a file: it points to the location of the root document and may be either a local +// file path or a URL. +// +// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") +// found in the spec are searched from the current working directory. +type FlattenOpts struct { + Spec *Spec // The analyzed spec to work with + flattenContext *context // Internal context to track flattening activity + + BasePath string // The location of the root document for this spec to resolve relative $ref + + // Flattening options + Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) + Minimal bool // When true, do not decompose complex structures such as allOf + Verbose bool // enable some reporting on possible name conflicts detected + RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening + ContinueOnError bool // Continue when spec expansion issues are found + KeepNames bool // Do not attempt to jsonify names from references when flattening + + /* Extra keys */ + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { + return &spec.ExpandOptions{ + RelativeBase: f.BasePath, + SkipSchemas: skipSchemas, + ContinueOnError: f.ContinueOnError, + } +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *spec.Swagger { + return f.Spec.spec +} + +// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting +// from flattening a spec +func (f *FlattenOpts) croak() { + if !f.Verbose { + return + } + + reported := make(map[string]bool, len(f.flattenContext.newRefs)) + for _, v := range f.Spec.references.allRefs { + // warns about duplicate handling + for _, r := range f.flattenContext.newRefs { + if r.isOAIGen && r.path == v.String() { + reported[r.newName] = true + } + } + } + + for k := range reported { + log.Printf("warning: duplicate flattened definition name resolved as %s", k) + } + + // warns about possible type mismatches + uniqueMsg := make(map[string]bool) + for _, msg := range f.flattenContext.warnings { + if _, ok := uniqueMsg[msg]; ok { + continue + } + log.Printf("warning: %s", msg) + uniqueMsg[msg] = true + } +} diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go new file mode 100644 index 00000000000..03e0d32e9ea --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package debug + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + output = os.Stdout +) + +// GetLogger provides a prefix debug logger +func GetLogger(prefix string, debug bool) func(string, ...any) { + if debug { + logger := log.New(output, prefix+":", log.LstdFlags) + + return func(msg string, args ...any) { + _, file1, pos1, _ := runtime.Caller(1) + logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } + } + + return func(_ string, _ ...any) {} +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go new file mode 100644 index 00000000000..320a50bff85 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go @@ -0,0 +1,90 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package normalize + +import ( + "net/url" + "path" + "path/filepath" + "strings" + + "github.com/go-openapi/spec" +) + +// RebaseRef rebases a remote ref relative to a base ref. +// +// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func RebaseRef(baseRef string, ref string) string { + baseRef, _ = url.PathUnescape(baseRef) + ref, _ = url.PathUnescape(ref) + + if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { + return ref + } + + parts := strings.Split(ref, "#") + + baseParts := strings.Split(baseRef, "#") + baseURL, _ := url.Parse(baseParts[0]) + if strings.HasPrefix(ref, "#") { + if baseURL.Host == "" { + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + refURL, _ := url.Parse(parts[0]) + if refURL.Host != "" || filepath.IsAbs(parts[0]) { + // not rebasing an absolute path + return ref + } + + // there is a relative path + var basePath string + if baseURL.Host != "" { + // when there is a host, standard URI rules apply (with "/") + baseURL.Path = path.Dir(baseURL.Path) + baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) + + return baseURL.String() + } + + // this is a local relative path + // basePart[0] and parts[0] are local filesystem directories/files + basePath = filepath.Dir(baseParts[0]) + relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) + if len(parts) > 1 { + return strings.Join([]string{relPath, parts[1]}, "#") + } + + return relPath +} + +// Path renders absolute path on remote file refs +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func Path(ref spec.Ref, basePath string) string { + uri, _ := url.PathUnescape(ref.String()) + if ref.HasFragmentOnly || filepath.IsAbs(uri) { + return uri + } + + refURL, _ := url.Parse(uri) + if refURL.Host != "" { + return uri + } + + parts := strings.Split(uri, "#") + // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage + parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) + + return strings.Join(parts, "#") +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go new file mode 100644 index 00000000000..940c46a9256 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package operations + +import ( + "path" + "slices" + "sort" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag/mangling" +) + +// AllOpRefsByRef returns an index of sortable operations +func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { + return OpRefsByRef(GatherOperations(specDoc, operationIDs)) +} + +// OpRefsByRef indexes a map of sortable operations +func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { + result := make(map[string]OpRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + + return result +} + +// OpRef is an indexable, sortable operation +type OpRef struct { + Method string + Path string + Key string + ID string + Op *spec.Operation + Ref spec.Ref +} + +// OpRefs is a sortable collection of operations +type OpRefs []OpRef + +func (o OpRefs) Len() int { return len(o) } +func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +// Provider knows how to collect operations from a spec +type Provider interface { + Operations() map[string]map[string]*spec.Operation +} + +// GatherOperations builds a map of sorted operations from a spec +func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { + var oprefs OpRefs + mangler := mangling.NewNameMangler() + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, OpRef{ + Key: mangler.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]OpRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + + if len(operationIDs) == 0 || slices.Contains(operationIDs, opr.ID) || slices.Contains(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + + return operations +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/errors.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/errors.go new file mode 100644 index 00000000000..d7c28b88571 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/errors.go @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package replace + +import ( + "errors" + "fmt" +) + +type replaceError string + +const ( + ErrReplace replaceError = "flatten replace error" + ErrUnexpectedType replaceError = "unexpected type used in getPointerFromKey" +) + +func (e replaceError) Error() string { + return string(e) +} + +func ErrNoSchemaWithRef(key string, value any) error { + return fmt.Errorf("no schema with ref found at %s for %T: %w", key, value, ErrReplace) +} + +func ErrNoSchema(key string) error { + return fmt.Errorf("no schema found at %s: %w", key, ErrReplace) +} + +func ErrNotANumber(key string, err error) error { + return errors.Join( + ErrReplace, + fmt.Errorf("%s not a number: %w", key, err), + ) +} + +func ErrUnhandledParentRewrite(key string, value any) error { + return fmt.Errorf("unhandled parent schema rewrite %s: %T: %w", key, value, ErrReplace) +} + +func ErrUnhandledParentType(key string, value any) error { + return fmt.Errorf("unhandled type for parent of %s: %T: %w", key, value, ErrReplace) +} + +func ErrNoParent(key string, err error) error { + return errors.Join( + fmt.Errorf("can't get parent for %s: %w", key, err), + ErrReplace, + ) +} + +func ErrUnhandledContainerType(key string, value any) error { + return fmt.Errorf("unhandled container type at %s: %T: %w", key, value, ErrReplace) +} + +func ErrCyclicChain(key string) error { + return fmt.Errorf("cannot resolve cyclic chain of pointers under %s: %w", key, ErrReplace) +} + +func ErrInvalidPointerType(key string, value any, err error) error { + return fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v): %w", + key, value, err, ErrReplace, + ) +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go new file mode 100644 index 00000000000..61c13f7ebaa --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -0,0 +1,456 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package replace + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "path" + "strconv" + + "github.com/go-openapi/analysis/internal/debug" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const ( + definitionsPath = "#/definitions" + allocMediumMap = 64 +) + +var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") + +// RewriteSchemaToRef replaces a schema with a Ref +func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { + debugLog("rewriting schema to ref for %s with %s", key, ref.String()) + _, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + return rewriteParentRef(sp, key, ref) + + case spec.Schema: + return rewriteParentRef(sp, key, ref) + + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + case map[string]any: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{} + return rewriteParentRef(sp, key, ref) + default: + return ErrNoSchemaWithRef(key, value) + } + + return nil +} + +func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { + parent, entry, pvalue, err := getParentFromKey(sp, key) + if err != nil { + return err + } + + debugLog("rewriting holder for %T", pvalue) + switch container := pvalue.(type) { + case spec.Response: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case *spec.Response: + container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(key[1:], err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]spec.Response: + resp := container[entry] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = resp + + case spec.Parameter: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case map[string]spec.Parameter: + param := container[entry] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = param + + case []spec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(key[1:], err) + } + param := container[idx] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[idx] = param + + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(key[1:], err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(key[1:], err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *any: + *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return ErrUnhandledParentRewrite(key, pvalue) + } + + return nil +} + +// getPointerFromKey retrieves the content of the JSON pointer "key" +func getPointerFromKey(sp any, key string) (string, any, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic(ErrUnexpectedType) + } + if key == "#/" { + return "", sp, nil + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + ptr, err := jsonpointer.New(pth) + if err != nil { + return "", nil, errors.Join(err, ErrReplace) + } + + value, _, err := ptr.Get(sp) + if err != nil { + debugLog("error when getting key: %s with path: %s", key, pth) + + return "", nil, errors.Join(err, ErrReplace) + } + + return pth, value, nil +} + +// getParentFromKey retrieves the container of the JSON pointer "key" +func getParentFromKey(sp any, key string) (string, string, any, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic(ErrUnexpectedType) + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + + parent, entry := path.Dir(pth), path.Base(pth) + debugLog("getting schema holder at: %s, with entry: %s", parent, entry) + + pptr, err := jsonpointer.New(parent) + if err != nil { + return "", "", nil, errors.Join(err, ErrReplace) + } + pvalue, _, err := pptr.Get(sp) + if err != nil { + return "", "", nil, ErrNoParent(parent, err) + } + + return parent, entry, pvalue, nil +} + +// UpdateRef replaces a ref by another one +func UpdateRef(sp any, key string, ref spec.Ref) error { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic(ErrUnexpectedType) + } + debugLog("updating ref for %s with %s", key, ref.String()) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + refable.Ref = ref + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case spec.Schema: + debugLog("rewriting holder for %T", refable) + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return erp + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(pth, err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(pth, err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return ErrUnhandledContainerType(key, value) + } + + default: + return ErrNoSchemaWithRef(key, value) + } + + return nil +} + +// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { + debugLog("updating ref for %s with schema", key) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + *refable = *sch + case spec.Schema: + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return erp + } + + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = *sch + + case map[string]spec.Schema: + container[entry] = *sch + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(pth, err) + } + container[idx] = *sch + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return ErrNotANumber(pth, err) + } + container.Schemas[idx] = *sch + + case spec.SchemaProperties: + container[entry] = *sch + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return ErrUnhandledParentType(key, value) + } + case *spec.SchemaOrArray: + *refable.Schema = *sch + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + case *spec.SchemaOrBool: + *refable.Schema = *sch + default: + return ErrNoSchemaWithRef(key, value) + } + + return nil +} + +// DeepestRefResult holds the results from DeepestRef analysis +type DeepestRefResult struct { + Ref spec.Ref + Schema *spec.Schema + Warnings []string +} + +// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded +// +// NOTE: all external $ref's are assumed to be already expanded at this stage. +func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { + if !ref.HasFragmentOnly { + // we found an external $ref, which is odd at this stage: + // do nothing on external $refs + return &DeepestRefResult{Ref: ref}, nil + } + + currentRef := ref + visited := make(map[string]bool, allocMediumMap) + warnings := make([]string, 0) + +DOWNREF: + for currentRef.String() != "" { + if path.Dir(currentRef.String()) == definitionsPath { + // this is a top-level definition: stop here and return this ref + return &DeepestRefResult{Ref: currentRef}, nil + } + + if _, beenThere := visited[currentRef.String()]; beenThere { + return nil, ErrCyclicChain(currentRef.String()) + } + + visited[currentRef.String()] = true + value, _, err := currentRef.GetPointer().Get(sp) + if err != nil { + return nil, err + } + + switch refable := value.(type) { + case *spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case *spec.SchemaOrArray: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case *spec.SchemaOrBool: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case spec.Response: + // a pointer points to a schema initially marshalled in responses section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return nil, ErrInvalidPointerType(currentRef.String(), value, err) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + case spec.Parameter: + // a pointer points to a schema initially marshalled in parameters section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, ErrInvalidPointerType(currentRef.String(), value, err) + } + + warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + default: + // fallback: attempts to resolve the pointer as a schema + if refable == nil { + break DOWNREF + } + + asJSON, _ := json.Marshal(refable) + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, ErrInvalidPointerType(currentRef.String(), value, err) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable)) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + } + } + + // assess what schema we're ending with + sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) + if erv != nil { + return nil, erv + } + + if sch == nil { + return nil, ErrNoSchema(currentRef.String()) + } + + return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go new file mode 100644 index 00000000000..7e9fb9f0a5f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package schutils provides tools to save or clone a schema +// when flattening a spec. +package schutils + +import ( + "github.com/go-openapi/spec" + "github.com/go-openapi/swag/jsonutils" +) + +const allocLargeMap = 150 + +// Save registers a schema as an entry in spec #/definitions +func Save(sp *spec.Swagger, name string, schema *spec.Schema) { + if schema == nil { + return + } + + if sp.Definitions == nil { + sp.Definitions = make(map[string]spec.Schema, allocLargeMap) + } + + sp.Definitions[name] = *schema +} + +// Clone deep-clones a schema +func Clone(schema *spec.Schema) *spec.Schema { + var sch spec.Schema + _ = jsonutils.FromDynamicJSON(schema, &sch) + + return &sch +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go new file mode 100644 index 00000000000..a5db0249ecc --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -0,0 +1,205 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package sortref + +import ( + "net/http" + "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const ( + paths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" +) + +var ( + ignoredKeys map[string]struct{} + validMethods map[string]struct{} +) + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } + + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +// Key represent a key item constructed from /-separated segments +type Key struct { + Segments int + Key string +} + +// Keys is a sortable collable collection of Keys +type Keys []Key + +func (k Keys) Len() int { return len(k) } +func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k Keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. +func KeyParts(key string) SplitKey { + var res []string + for part := range strings.SplitSeq(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + + return res +} + +// SplitKey holds of the parts of a /-separated key, so that their location may be determined. +type SplitKey []string + +// IsDefinition is true when the split key is in the #/definitions section of a spec +func (s SplitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +// DefinitionName yields the name of the definition +func (s SplitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + + return s[1] +} + +// PartAdder know how to construct the components of a new name +type PartAdder func(string) []string + +// BuildName builds a name from segments +func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { + for i, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { + segments = append(segments, adder(part)...) + } + } + + return strings.Join(segments, " ") +} + +// IsOperation is true when the split key is in the operations section +func (s SplitKey) IsOperation() bool { + return len(s) > 1 && s[0] == paths +} + +// IsSharedOperationParam is true when the split key is in the parameters section of a path +func (s SplitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == paths && s[2] == parameters +} + +// IsSharedParam is true when the split key is in the #/parameters section of a spec +func (s SplitKey) IsSharedParam() bool { + return len(s) > 1 && s[0] == parameters +} + +// IsOperationParam is true when the split key is in the parameters section of an operation +func (s SplitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == paths && s[3] == parameters +} + +// IsOperationResponse is true when the split key is in the responses section of an operation +func (s SplitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == paths && s[3] == responses +} + +// IsSharedResponse is true when the split key is in the #/responses section of a spec +func (s SplitKey) IsSharedResponse() bool { + return len(s) > 1 && s[0] == responses +} + +// IsDefaultResponse is true when the split key is the default response for an operation +func (s SplitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" +} + +// IsStatusCodeResponse is true when the split key is an operation response with a status code +func (s SplitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + + return err == nil + } + + return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() +} + +// ResponseName yields either the status code or "Default" for a response +func (s SplitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + + return http.StatusText(code) + } + + if s.IsDefaultResponse() { + return "Default" + } + + return "" +} + +// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} +func (s SplitKey) PathItemRef() spec.Ref { + const minValidPathItems = 3 + if len(s) < minValidPathItems { + return spec.Ref{} + } + + pth, method := s[1], s[2] + if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +// PathRef constructs a $ref object from a split key of the form /paths/{reference} +func (s SplitKey) PathRef() spec.Ref { + if !s.IsOperation() { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) +} + +func (s SplitKey) isKeyName(i int) bool { + if i <= 0 { + return false + } + + count := 0 + for idx := i - 1; idx > 0; idx-- { + if s[idx] != "properties" { + break + } + count++ + } + + return count%2 != 0 +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go new file mode 100644 index 00000000000..ceac7137728 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go @@ -0,0 +1,144 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package sortref + +import ( + "reflect" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/spec" +) + +var depthGroupOrder = []string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", +} + +type mapIterator struct { + len int + mapIter *reflect.MapIter +} + +func (i *mapIterator) Next() bool { + return i.mapIter.Next() +} + +func (i *mapIterator) Len() int { + return i.len +} + +func (i *mapIterator) Key() string { + return i.mapIter.Key().String() +} + +func mustMapIterator(anyMap any) *mapIterator { + val := reflect.ValueOf(anyMap) + + return &mapIterator{mapIter: val.MapRange(), len: val.Len()} +} + +// DepthFirst sorts a map of anything. It groups keys by category +// (shared params, op param, statuscode response, default response, definitions) +// sort groups internally by number of parts in the key and lexical names +// flatten groups into a single list of keys +func DepthFirst(in any) []string { + iterator := mustMapIterator(in) + sorted := make([]string, 0, iterator.Len()) + grouped := make(map[string]Keys, iterator.Len()) + + for iterator.Next() { + k := iterator.Key() + split := KeyParts(k) + var pk string + + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + if split.IsSharedParam() { + pk = "sharedParam" + } + if split.IsSharedResponse() { + pk = "sharedResponse" + } + grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + + return sorted +} + +// topMostRefs is able to sort refs by hierarchical then lexicographic order, +// yielding refs ordered breadth-first. +type topmostRefs []string + +func (k topmostRefs) Len() int { return len(k) } +func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k topmostRefs) Less(i, j int) bool { + li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) + if li == lj { + return k[i] < k[j] + } + + return li < lj +} + +// TopmostFirst sorts references by depth +func TopmostFirst(refs []string) []string { + res := topmostRefs(refs) + sort.Sort(res) + + return res +} + +// RefRevIdx is a reverse index for references +type RefRevIdx struct { + Ref spec.Ref + Keys []string +} + +// ReverseIndex builds a reverse index for references in schemas +func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { + collected := make(map[string]RefRevIdx) + for key, schRef := range schemas { + // normalize paths before sorting, + // so we get together keys that are from the same external file + normalizedPath := normalize.Path(schRef, basePath) + + entry, ok := collected[normalizedPath] + if ok { + entry.Keys = append(entry.Keys, key) + collected[normalizedPath] = entry + + continue + } + + collected[normalizedPath] = RefRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + + return collected +} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 00000000000..cc5c392334b --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,484 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "fmt" + "reflect" + "slices" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. +// +// The following parts of primary are subject to merge, filling empty details +// - Info +// - BasePath +// - Host +// - ExternalDocs +// +// Consider calling FixEmptyResponseDescriptions() on the modified primary +// if you read them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag a warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No key normalization takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +// +// Merging schemes (http, https), and consumers/producers do not account for +// collisions. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + skipped := make([]string, 0, len(mixins)) + opIDs := getOpIDs(primary) + initPrimary(primary) + + for i, m := range mixins { + skipped = append(skipped, mergeSwaggerProps(primary, m)...) + + skipped = append(skipped, mergeConsumes(primary, m)...) + + skipped = append(skipped, mergeProduces(primary, m)...) + + skipped = append(skipped, mergeTags(primary, m)...) + + skipped = append(skipped, mergeSchemes(primary, m)...) + + skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) + + skipped = append(skipped, mergeSecurityRequirements(primary, m)...) + + skipped = append(skipped, mergeDefinitions(primary, m)...) + + // merging paths requires a map of operationIDs to work with + skipped = append(skipped, mergePaths(primary, m, opIDs, i)...) + + skipped = append(skipped, mergeParameters(primary, m)...) + + skipped = append(skipped, mergeResponses(primary, m)...) + } + + return skipped +} + +// getOpIDs extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIDs(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + + for _, op := range piops { + rv[op.ID] = true + } + } + + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + + return append(ops, op) +} + +func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.SecurityDefinitions { + if _, exists := primary.SecurityDefinitions[k]; exists { + warn := fmt.Sprintf( + "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + primary.SecurityDefinitions[k] = v + } + + return +} + +func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Security { + found := false + for _, vv := range primary.Security { + if reflect.DeepEqual(v, vv) { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) + skipped = append(skipped, warn) + + continue + } + primary.Security = append(primary.Security, v) + } + + return +} + +func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf( + "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Definitions[k] = v + } + + return +} + +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) { + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf( + "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIDs[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) + } + opIDs[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + + return +} + +func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf( + "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Parameters[k] = v + } + + return +} + +func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf( + "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Responses[k] = v + } + + return skipped +} + +func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Consumes { + found := slices.Contains(primary.Consumes, v) + + if found { + // no warning here: we just skip it + continue + } + primary.Consumes = append(primary.Consumes, v) + } + + return []string{} +} + +func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Produces { + found := slices.Contains(primary.Produces, v) + + if found { + // no warning here: we just skip it + continue + } + primary.Produces = append(primary.Produces, v) + } + + return []string{} +} + +func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Tags { + found := false + for _, vv := range primary.Tags { + if v.Name == vv.Name { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", + v.Name, + ) + skipped = append(skipped, warn) + + continue + } + + primary.Tags = append(primary.Tags, v) + } + + return +} + +func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Schemes { + found := slices.Contains(primary.Schemes, v) + + if found { + // no warning here: we just skip it + continue + } + primary.Schemes = append(primary.Schemes, v) + } + + return []string{} +} + +func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { + var skipped, skippedInfo, skippedDocs []string + + primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) + + // merging details in swagger top properties + if primary.Host == "" { + primary.Host = m.Host + } + + if primary.BasePath == "" { + primary.BasePath = m.BasePath + } + + if primary.Info == nil { + primary.Info = m.Info + } else if m.Info != nil { + skippedInfo = mergeInfo(primary.Info, m.Info) + skipped = append(skipped, skippedInfo...) + } + + if primary.ExternalDocs == nil { + primary.ExternalDocs = m.ExternalDocs + } else if m != nil { + skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) + skipped = append(skipped, skippedDocs...) + } + + return skipped +} + +//nolint:unparam +func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.URL == "" { + primary.URL = m.URL + } + + return nil +} + +func mergeInfo(primary *spec.Info, m *spec.Info) []string { + var sk, skipped []string + + primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) + skipped = append(skipped, sk...) + + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.Title == "" { + primary.Title = m.Title + } + + if primary.TermsOfService == "" { + primary.TermsOfService = m.TermsOfService + } + + if primary.Version == "" { + primary.Version = m.Version + } + + if primary.Contact == nil { + primary.Contact = m.Contact + } else if m.Contact != nil { + var csk []string + primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) + skipped = append(skipped, csk...) + + if primary.Contact.Name == "" { + primary.Contact.Name = m.Contact.Name + } + + if primary.Contact.URL == "" { + primary.Contact.URL = m.Contact.URL + } + + if primary.Contact.Email == "" { + primary.Contact.Email = m.Contact.Email + } + } + + if primary.License == nil { + primary.License = m.License + } else if m.License != nil { + var lsk []string + primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) + skipped = append(skipped, lsk...) + + if primary.License.Name == "" { + primary.License.Name = m.License.Name + } + + if primary.License.URL == "" { + primary.License.URL = m.License.URL + } + } + + return skipped +} + +func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { + if primary == nil { + result = m + + return + } + + if m == nil { + result = primary + + return + } + + result = primary + for k, v := range m { + if _, found := primary[k]; found { + skipped = append(skipped, k) + + continue + } + + primary[k] = v + } + + return +} + +func initPrimary(primary *spec.Swagger) { + if primary.SecurityDefinitions == nil { + primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) + } + + if primary.Security == nil { + primary.Security = make([]map[string][]string, 0, allocSmallMap) + } + + if primary.Produces == nil { + primary.Produces = make([]string, 0, allocSmallMap) + } + + if primary.Consumes == nil { + primary.Consumes = make([]string, 0, allocSmallMap) + } + + if primary.Tags == nil { + primary.Tags = make([]spec.Tag, 0, allocSmallMap) + } + + if primary.Schemes == nil { + primary.Schemes = make([]string, 0, allocSmallMap) + } + + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } +} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 00000000000..039dac15661 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package analysis + +import ( + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root any + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + if opts.Schema == nil { + return nil, ErrNoSchema + } + + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + a.inferTuple() + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root any + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + sch := new(spec.Schema) + sch.Ref = a.schema.Ref + err := spec.ExpandSchema(sch, a.root, nil) + if err != nil { + return err + } + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + // NOTE(fredbi): currently the only cause for errors is + // unresolved ref. Since spec.ExpandSchema() expands the + // schema recursively, there is no chance to get there, + // until we add more causes for error in this schema analysis. + return err + } + a.inherits(rsch) + } + + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if !a.isObjectType() { + return nil + } + + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + + if !a.IsMap { + return nil + } + + // maps + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple + // (yes, even if the Items array contains only one element). + // arrays in JSON schema may be unrestricted (i.e no Items specified). + // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. + // + // NOTE: the spec package misses the distinction between: + // items: [] and items: {}, so we consider both arrays here. + a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + + return nil +} + +func (a *AnalyzedSchema) inferTuple() { + tuple := a.hasItems && a.schema.Items.Schemas != nil + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} + +// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). +// +// Complex means the schema is any of: +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) +func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { + return !a.IsSimpleSchema && !a.IsArray && !a.IsMap +} diff --git a/vendor/github.com/go-openapi/errors/.gitattributes b/vendor/github.com/go-openapi/errors/.gitattributes new file mode 100644 index 00000000000..a0717e4b3b9 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore new file mode 100644 index 00000000000..dd91ed6a04e --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml new file mode 100644 index 00000000000..5609b4fea9c --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + #- intrange # disabled while < go1.22 + - ireturn + - lll + - musttag + - nestif + - nlreturn + - noinlineerr + - nonamedreturns + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md new file mode 100644 index 00000000000..d7e3a18bcf5 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/README.md @@ -0,0 +1,12 @@ +# OpenAPI errors [![Build Status](https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) + +Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go new file mode 100644 index 00000000000..d39233bafe4 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/api.go @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" +) + +// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. +var DefaultHTTPCode = http.StatusUnprocessableEntity + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// MarshalJSON implements the JSON encoding interface +func (a apiError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "code": a.code, + "message": a.message, + }) +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...any) Error { + if len(args) > 0 { + return &apiError{ + code: code, + message: fmt.Sprintf(message, args...), + } + } + return &apiError{ + code: code, + message: message, + } +} + +// NotFound creates a new not found error +func NotFound(message string, args ...any) Error { + if message == "" { + message = "Not found" + } + return New(http.StatusNotFound, message, args...) +} + +// NotImplemented creates a new not implemented error +func NotImplemented(message string) Error { + return New(http.StatusNotImplemented, "%s", message) +} + +// MethodNotAllowedError represents an error for when the path matches but the method doesn't +type MethodNotAllowedError struct { + code int32 + Allowed []string + message string +} + +func (m *MethodNotAllowedError) Error() string { + return m.message +} + +// Code the error code +func (m *MethodNotAllowedError) Code() int32 { + return m.code +} + +// MarshalJSON implements the JSON encoding interface +func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "code": m.code, + "message": m.message, + "allowed": m.Allowed, + }) +} + +func errorAsJSON(err Error) []byte { + //nolint:errchkjson + b, _ := json.Marshal(struct { + Code int32 `json:"code"` + Message string `json:"message"` + }{err.Code(), err.Error()}) + return b +} + +func flattenComposite(errs *CompositeError) *CompositeError { + var res []error + for _, er := range errs.Errors { + switch e := er.(type) { + case *CompositeError: + if e != nil && len(e.Errors) > 0 { + flat := flattenComposite(e) + if len(flat.Errors) > 0 { + res = append(res, flat.Errors...) + } + } + default: + if e != nil { + res = append(res, e) + } + } + } + return CompositeValidationError(res...) +} + +// MethodNotAllowed creates a new method not allowed error +func MethodNotAllowed(requested string, allow []string) Error { + msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) + return &MethodNotAllowedError{ + code: http.StatusMethodNotAllowed, + Allowed: allow, + message: msg, + } +} + +// ServeError implements the http error handler interface +func ServeError(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Set("Content-Type", "application/json") + switch e := err.(type) { + case *CompositeError: + er := flattenComposite(e) + // strips composite errors to first element only + if len(er.Errors) > 0 { + ServeError(rw, r, er.Errors[0]) + } else { + // guard against empty CompositeError (invalid construct) + ServeError(rw, r, nil) + } + case *MethodNotAllowedError: + rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case Error: + value := reflect.ValueOf(e) + if value.Kind() == reflect.Ptr && value.IsNil() { + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + return + } + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case nil: + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + default: + rw.WriteHeader(http.StatusInternalServerError) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "%v", err))) + } + } +} + +func asHTTPCode(input int) int { + if input >= maximumValidHTTPCode { + return DefaultHTTPCode + } + return input +} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go new file mode 100644 index 00000000000..08de582e5db --- /dev/null +++ b/vendor/github.com/go-openapi/errors/auth.go @@ -0,0 +1,11 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package errors + +import "net/http" + +// Unauthenticated returns an unauthenticated error +func Unauthenticated(scheme string) Error { + return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) +} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go new file mode 100644 index 00000000000..b4627f30f4c --- /dev/null +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +/* +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). +*/ +package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go new file mode 100644 index 00000000000..2d837c34ac4 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/headers.go @@ -0,0 +1,92 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// Validation represents a failure of a precondition +type Validation struct { //nolint: errname + code int32 + Name string + In string + Value any + message string + Values []any +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e Validation) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "values": e.Values, + }) +} + +// ValidateName sets the name for a validation or updates it for a nested property +func (e *Validation) ValidateName(name string) *Validation { + if name != "" { + if e.Name == "" { + e.Name = name + e.message = name + e.message + } else { + e.Name = name + "." + e.Name + e.message = name + "." + e.message + } + } + return e +} + +const ( + contentTypeFail = `unsupported media type %q, only %v are allowed` + responseFormatFail = `unsupported media type requested, only %v are available` +) + +// InvalidContentType error for an invalid content type +func InvalidContentType(value string, allowed []string) *Validation { + values := make([]any, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusUnsupportedMediaType, + Name: "Content-Type", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(contentTypeFail, value, allowed), + } +} + +// InvalidResponseFormat error for an unacceptable response format request +func InvalidResponseFormat(value string, allowed []string) *Validation { + values := make([]any, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusNotAcceptable, + Name: "Accept", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(responseFormatFail, allowed), + } +} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go new file mode 100644 index 00000000000..c434e59a6fa --- /dev/null +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -0,0 +1,39 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package errors + +import ( + "bytes" + "fmt" + "strings" +) + +// APIVerificationFailed is an error that contains all the missing info for a mismatched section +// between the api registrations and the api spec +type APIVerificationFailed struct { //nolint: errname + Section string `json:"section,omitempty"` + MissingSpecification []string `json:"missingSpecification,omitempty"` + MissingRegistration []string `json:"missingRegistration,omitempty"` +} + +func (v *APIVerificationFailed) Error() string { + buf := bytes.NewBuffer(nil) + + hasRegMissing := len(v.MissingRegistration) > 0 + hasSpecMissing := len(v.MissingSpecification) > 0 + + if hasRegMissing { + fmt.Fprintf(buf, "missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section) + } + + if hasRegMissing && hasSpecMissing { + buf.WriteString("\n") + } + + if hasSpecMissing { + fmt.Fprintf(buf, "missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section) + } + + return buf.String() +} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go new file mode 100644 index 00000000000..ea2a7c60377 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/parsing.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// ParseError represents a parsing error +type ParseError struct { + code int32 + Name string + In string + Value string + Reason error + message string +} + +// NewParseError creates a new parse error +func NewParseError(name, in, value string, reason error) *ParseError { + var msg string + if in == "" { + msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) + } else { + msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) + } + return &ParseError{ + code: http.StatusBadRequest, + Name: name, + In: in, + Value: value, + Reason: reason, + message: msg, + } +} + +func (e *ParseError) Error() string { + return e.message +} + +// Code returns the http status code for this error +func (e *ParseError) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e ParseError) MarshalJSON() ([]byte, error) { + var reason string + if e.Reason != nil { + reason = e.Reason.Error() + } + return json.Marshal(map[string]any{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "reason": reason, + }) +} + +const ( + parseErrorTemplContent = `parsing %s %s from %q failed, because %s` + parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` +) diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go new file mode 100644 index 00000000000..e59ca4f863f --- /dev/null +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -0,0 +1,608 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + readOnlyFail = "%s in %s is readOnly" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maximumIncFail = "%s in %s should be less than or equal to %v" + maximumExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maximumItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + readOnlyFailNoIn = "%s is readOnly" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maximumIncFailNoIn = "%s should be less than or equal to %v" + maximumExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maximumItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +const maximumValidHTTPCode = 600 + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = http.StatusUnprocessableEntity + + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = maximumValidHTTPCode + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode + ReadOnlyFailCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +func (c *CompositeError) Unwrap() []error { + return c.Errors +} + +// MarshalJSON implements the JSON encoding interface +func (c CompositeError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "code": c.code, + "message": c.message, + "errors": c.Errors, + }) +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append(make([]error, 0, len(errors)), errors...), + message: "validation failure list", + } +} + +// ValidateName recursively sets the name for all validations or updates them for nested properties +func (c *CompositeError) ValidateName(name string) *CompositeError { + for i, e := range c.Errors { + if ve, ok := e.(*Validation); ok { + c.Errors[i] = ve.ValidateName(name) + } else if ce, ok := e.(*CompositeError); ok { + c.Errors[i] = ce.ValidateName(name) + } + } + + return c +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value any) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, maximum int64, value any) *Validation { + msg := fmt.Sprintf(maximumItemsFail, name, in, maximum) + if in == "" { + msg = fmt.Sprintf(maximumItemsFailNoIn, name, maximum) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, minimum int64, value any) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, minimum) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, minimum) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ExceedsMaximumInt error for when maximumimum validation fails +func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value any) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximumUint error for when maximumimum validation fails +func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value any) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximum error for when maximumimum validation fails +func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value any) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumInt error for when minimum validation fails +func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value any) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumUint error for when minimum validation fails +func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value any) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimum error for when minimum validation fails +func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value any) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple, value any) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value any, values []any) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string, value any) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ReadOnly error for when a value is present in request +func ReadOnly(name, in string, value any) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(readOnlyFailNoIn, name) + } else { + msg = fmt.Sprintf(readOnlyFail, name, in) + } + return &Validation{ + code: ReadOnlyFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, maximum int64, value any) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, maximum) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, maximum) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, minimum int64, value any) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, minimum) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, minimum) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string, value any) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor any) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore new file mode 100644 index 00000000000..769c244007b --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 00000000000..7cea1af8b52 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + #- intrange # disabled while < go1.22 + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 00000000000..45bd31b14fc --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,26 @@ +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) + +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References + + + +also known as [RFC6901](https://www.rfc-editor.org/rfc/rfc6901) + +### Note + +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. + +That is because our implementation of the JSON pointer only supports explicit references to array elements: the provision in the spec +to resolve non-existent members as "the last element in the array", using the special trailing character "-". diff --git a/vendor/github.com/go-openapi/jsonpointer/errors.go b/vendor/github.com/go-openapi/jsonpointer/errors.go new file mode 100644 index 00000000000..b84343d9d74 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/errors.go @@ -0,0 +1,18 @@ +package jsonpointer + +type pointerError string + +func (e pointerError) Error() string { + return string(e) +} + +const ( + // ErrPointer is an error raised by the jsonpointer package + ErrPointer pointerError = "JSON pointer error" + + // ErrInvalidStart states that a JSON pointer must start with a separator ("/") + ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator + + // ErrUnsupportedValueType indicates that a value of the wrong type is being set + ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values" +) diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 00000000000..7513c4763ba --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,535 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag/jsonname" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` +) + +var ( + jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() + jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() +) + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (any, error) +} + +// JSONSetable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONSetable interface { + JSONSet(string, any) error +} + +// Pointer is a representation of a json pointer +type Pointer struct { + referenceTokens []string +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + var p Pointer + err := p.parse(jsonPointerString) + + return p, err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { + return p.get(document, jsonname.DefaultJSONNameProvider) +} + +// Set uses the pointer to set a value from a JSON document +func (p *Pointer) Set(document any, value any) (any, error) { + return document, p.set(document, value, jsonname.DefaultJSONNameProvider) +} + +// DecodedTokens returns the decoded tokens of this JSON pointer +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + return pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) +} + +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) + } + default: + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) + } + } + return offset, nil +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.Join(ErrInvalidStart, ErrPointer) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) + } + } + + return err +} + +func (p *Pointer) get(node any, nameProvider *jsonname.NameProvider) (any, reflect.Kind, error) { + if nameProvider == nil { + nameProvider = jsonname.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node = r + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error { + knd := reflect.ValueOf(node).Kind() + + if knd != reflect.Pointer && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return errors.Join( + ErrUnsupportedValueType, + ErrPointer, + ) + } + + if nameProvider == nil { + nameProvider = jsonname.DefaultJSONNameProvider + } + + // Full document when empty + if len(p.referenceTokens) == 0 { + return nil + } + + lastI := len(p.referenceTokens) - 1 + for i, token := range p.referenceTokens { + isLastToken := i == lastI + decodedToken := Unescape(token) + + if isLastToken { + + return setSingleImpl(node, data, decodedToken, nameProvider) + } + + // Check for nil during traversal + if isNil(node) { + return fmt.Errorf("cannot traverse through nil value at %q: %w", decodedToken, ErrPointer) + } + + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Pointer { + node = fld.Addr().Interface() + continue + } + node = r + continue + } + + switch kind { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) + } + fld := rValue.FieldByName(nm) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Pointer { + node = fld.Addr().Interface() + continue + } + node = fld.Interface() + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if !mv.IsValid() { + return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) + } + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Pointer { + node = mv.Addr().Interface() + continue + } + node = mv.Interface() + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) + } + + elem := rValue.Index(tokenIndex) + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Pointer { + node = elem.Addr().Interface() + continue + } + node = elem.Interface() + + default: + return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) + } + } + + return nil +} + +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Pointer, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, jsonname.DefaultJSONNameProvider) +} + +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document any, decodedToken string, value any) (any, error) { + return document, setSingleImpl(document, value, decodedToken, jsonname.DefaultJSONNameProvider) +} + +func getSingleImpl(node any, decodedToken string, nameProvider *jsonname.NameProvider) (any, reflect.Kind, error) { + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer) + } + + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) + } + + switch kind { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if mv.IsValid() { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) + } +} + +func setSingleImpl(node, data any, decodedToken string, nameProvider *jsonname.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + + // Check for nil to prevent panic when calling rValue.Type() + if isNil(node) { + return fmt.Errorf("cannot set field %q on nil value: %w", decodedToken, ErrPointer) + } + + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + switch rValue.Kind() { //nolint:exhaustive + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) + } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) + } +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) + } + } + return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + } + } + + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return err + } + case '[': + if err = drainSingle(dec); err != nil { + return err + } + } + } + } + + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +var ( + encRefTokReplacer = strings.NewReplacer(encRefTok1, decRefTok1, encRefTok0, decRefTok0) + decRefTokReplacer = strings.NewReplacer(decRefTok1, encRefTok1, decRefTok0, encRefTok0) +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + return encRefTokReplacer.Replace(token) +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + return decRefTokReplacer.Replace(token) +} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore new file mode 100644 index 00000000000..769c244007b --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml new file mode 100644 index 00000000000..7cea1af8b52 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + #- intrange # disabled while < go1.22 + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/NOTICE b/vendor/github.com/go-openapi/jsonreference/NOTICE new file mode 100644 index 00000000000..f9ad7e0f7a0 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/NOTICE @@ -0,0 +1,36 @@ +Copyright 2015-2025 go-swagger maintainers + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +This software library, github.com/go-openapi/jsonpointer, includes software developed +by the go-swagger and go-openapi maintainers ("go-swagger maintainers"). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this software except in compliance with the License. +You may obtain a copy of the License at + +This software is copied from, derived from, and inspired by other original software products. +It ships with copies of other software which license terms are recalled below. + +The original sofware was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com). + +github.com/sigh-399/jsonpointer +=========================== + +// SPDX-FileCopyrightText: Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// SPDX-License-Identifier: Apache-2.0 + +Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 00000000000..2274a4b78fc --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,26 @@ +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) + +An implementation of JSON Reference - Go language + +## Status +Feature complete. Stable API + +## Dependencies +* https://github.com/go-openapi/jsonpointer + +## References + +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +See the license [NOTICE](./NOTICE), which recalls the licensing terms of all the pieces of software +on top of which it has been built. diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 00000000000..ca79391dcf3 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: Copyright (c) 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 00000000000..33d4798cad3 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,135 @@ +// SPDX-FileCopyrightText: Copyright (c) 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" +) + +const ( + fragmentRune = `#` +) + +var ErrChildURL = errors.New("child url is nil") + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + var r Ref + err := r.parse(jsonReferenceString) + return r, err +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + + return r +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, ErrChildURL + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + internal.NormalizeURL(parsed) + + r.referenceURL = parsed + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 00000000000..e4f15f17bfc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 00000000000..1ad5adf47e6 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 00000000000..cd4a7c331bc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,25 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.16.x +- 1.x +install: +- go get gotest.tools/gotestsum +language: go +arch: +- amd64 +- ppc64le +jobs: + include: + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 00000000000..1f0174f2d91 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,32 @@ +# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +Loading of OAI v2 API specification documents from local or remote locations. Supports JSON and YAML documents. + +Primary usage: + +```go + import ( + "github.com/go-openapi/loads" + ) + + ... + + // loads a YAML spec from a http file + doc, err := loads.Spec(ts.URL) + + ... + + // retrieves the object model for the API specification + spec := doc.Spec() + + ... +``` + +See also the provided [examples](https://pkg.go.dev/github.com/go-openapi/loads#pkg-examples). + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 00000000000..7981e70e9f1 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package loads provides document loading methods for swagger (OAI v2) API specifications. +// +// It is used by other go-openapi packages to load and run analysis on local or remote spec documents. +package loads diff --git a/vendor/github.com/go-openapi/loads/errors.go b/vendor/github.com/go-openapi/loads/errors.go new file mode 100644 index 00000000000..8f2d602f5cc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/errors.go @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loads + +type loaderError string + +func (e loaderError) Error() string { + return string(e) +} + +const ( + // ErrLoads is an error returned by the loads package + ErrLoads loaderError = "loaderrs error" + + // ErrNoLoader indicates that no configured loader matched the input + ErrNoLoader loaderError = "no loader matched" +) diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go new file mode 100644 index 00000000000..25b157302e4 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -0,0 +1,155 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loads + +import ( + "encoding/json" + "errors" + "net/url" + "slices" + + "github.com/go-openapi/spec" + "github.com/go-openapi/swag/loading" +) + +var ( + // Default chain of loaders, defined at the package level. + // + // By default this matches json and yaml documents. + // + // May be altered with AddLoader(). + loaders *loader +) + +func init() { + jsonLoader := &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: func(_ string) bool { + return true + }, + Fn: JSONDoc, + }, + } + + loaders = jsonLoader.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: loading.YAMLMatcher, + Fn: loading.YAMLDoc, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} + +// DocLoader represents a doc loader type +type DocLoader func(string, ...loading.Option) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +// DocLoaderWithMatch describes a loading function for a given extension match. +type DocLoaderWithMatch struct { + Fn DocLoader + Match DocMatcher +} + +// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options +func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { + return DocLoaderWithMatch{ + Fn: fn, + Match: matcher, + } +} + +type loader struct { + DocLoaderWithMatch + + loadingOptions []loading.Option + + Next *loader +} + +// WithHead adds a loader at the head of the current stack +func (l *loader) WithHead(head *loader) *loader { + if head == nil { + return l + } + head.Next = l + return head +} + +// WithNext adds a loader at the trail of the current stack +func (l *loader) WithNext(next *loader) *loader { + l.Next = next + return next +} + +// Load the raw document from path +func (l *loader) Load(path string) (json.RawMessage, error) { + _, erp := url.Parse(path) + if erp != nil { + return nil, errors.Join(erp, ErrLoads) + } + + var lastErr error = ErrNoLoader // default error if no match was found + for ldr := l; ldr != nil; ldr = ldr.Next { + if ldr.Match != nil && !ldr.Match(path) { + continue + } + + // try then move to next one if there is an error + b, err := ldr.Fn(path, l.loadingOptions...) + if err == nil { + return b, nil + } + + lastErr = err + } + + return nil, errors.Join(lastErr, ErrLoads) +} + +func (l *loader) clone() *loader { + if l == nil { + return nil + } + + return &loader{ + DocLoaderWithMatch: l.DocLoaderWithMatch, + loadingOptions: slices.Clone(l.loadingOptions), + Next: l.Next.clone(), + } +} + +// JSONDoc loads a json document from either a file or a remote url. +// +// See [loading.Option] for available options (e.g. configuring authentifaction, +// headers or using embedded file system resources). +func JSONDoc(path string, opts ...loading.Option) (json.RawMessage, error) { + data, err := loading.LoadFromFileOrHTTP(path, opts...) + if err != nil { + return nil, errors.Join(err, ErrLoads) + } + return json.RawMessage(data), nil +} + +// AddLoader for a document, executed before other previously set loaders. +// +// This sets the configuration at the package level. +// +// NOTE: +// - this updates the default loader used by github.com/go-openapi/spec +// - since this sets package level globals, you shouln't call this concurrently +func AddLoader(predicate DocMatcher, load DocLoader) { + loaders = loaders.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: predicate, + Fn: load, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go new file mode 100644 index 00000000000..adb5e6d15b0 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/options.go @@ -0,0 +1,77 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loads + +import "github.com/go-openapi/swag/loading" + +type options struct { + loader *loader + loadingOptions []loading.Option +} + +func defaultOptions() *options { + return &options{ + loader: loaders, + } +} + +func loaderFromOptions(options []LoaderOption) *loader { + opts := defaultOptions() + for _, apply := range options { + apply(opts) + } + + l := opts.loader.clone() + l.loadingOptions = opts.loadingOptions + + return l +} + +// LoaderOption allows to fine-tune the spec loader behavior +type LoaderOption func(*options) + +// WithDocLoader sets a custom loader for loading specs +func WithDocLoader(l DocLoader) LoaderOption { + return func(opt *options) { + if l == nil { + return + } + opt.loader = &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Fn: l, + }, + } + } +} + +// WithDocLoaderMatches sets a chain of custom loaders for loading specs +// for different extension matches. +// +// Loaders are executed in the order of provided DocLoaderWithMatch'es. +func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { + return func(opt *options) { + var final, prev *loader + for _, ldr := range l { + if ldr.Fn == nil { + continue + } + + if prev == nil { + final = &loader{DocLoaderWithMatch: ldr} + prev = final + continue + } + + prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) + } + opt.loader = final + } +} + +// WithLoadingOptions adds some [loading.Option] to be added when calling a registered loader. +func WithLoadingOptions(loadingOptions ...loading.Option) LoaderOption { + return func(opt *options) { + opt.loadingOptions = loadingOptions + } +} diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 00000000000..213c40c657a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,276 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loads + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "errors" + "fmt" + "maps" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag/yamlutils" +) + +func init() { + gob.Register(map[string]any{}) + gob.Register([]any{}) +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + pathLoader *loader + raw json.RawMessage +} + +// JSONSpec loads a spec from a json document, using the [JSONDoc] loader. +// +// A set of [loading.Option] may be passed to this loader using [WithLoadingOptions]. +func JSONSpec(path string, opts ...LoaderOption) (*Document, error) { + var o options + for _, apply := range opts { + apply(&o) + } + + data, err := JSONDoc(path, o.loadingOptions...) + if err != nil { + return nil, err + } + // convert to json + doc, err := Analyzed(data, "", opts...) + if err != nil { + return nil, err + } + + doc.specFilePath = path + + return doc, nil +} + +// Embedded returns a Document based on embedded specs (i.e. as a raw [json.RawMessage]). No analysis is required +func Embedded(orig, flat json.RawMessage, opts ...LoaderOption) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + pathLoader: loaderFromOptions(opts), + }, nil +} + +// Spec loads a new spec document from a local or remote path. +// +// By default it uses a JSON or YAML loader, with auto-detection based on the resource extension. +func Spec(path string, opts ...LoaderOption) (*Document, error) { + ldr := loaderFromOptions(opts) + + b, err := ldr.Load(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "", opts...) + if err != nil { + return nil, err + } + + document.specFilePath = path + document.pathLoader = ldr + + return document, nil +} + +// Analyzed creates a new analyzed spec document for a root json.RawMessage. +func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported: %w", version, ErrLoads) + } + + raw, err := trimData(data) // trim blanks, then convert yaml docs into json + if err != nil { + return nil, err + } + + swspec := new(spec.Swagger) + if err = json.Unmarshal(raw, swspec); err != nil { + return nil, errors.Join(err, ErrLoads) + } + + origsqspec, err := cloneSpec(swspec) + if err != nil { + return nil, errors.Join(err, ErrLoads) + } + + d := &Document{ + Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + pathLoader: loaderFromOptions(options), + } + + return d, nil +} + +func trimData(in json.RawMessage) (json.RawMessage, error) { + trimmed := bytes.TrimSpace(in) + if len(trimmed) == 0 { + return in, nil + } + + if trimmed[0] == '{' || trimmed[0] == '[' { + return trimmed, nil + } + + // assume yaml doc: convert it to json + yml, err := yamlutils.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v: %w", err, ErrLoads) + } + + d, err := yamlutils.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v: %w", err, ErrLoads) + } + + return d, nil +} + +// Expanded expands the $ref fields in the spec [Document] and returns a new expanded [Document] +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + if expandOptions.RelativeBase == "" { + expandOptions.RelativeBase = d.specFilePath + } + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if expandOptions.PathLoader == nil { + if d.pathLoader != nil { + // use loader from Document options + expandOptions.PathLoader = d.pathLoader.Load + } else { + // use package level loader + expandOptions.PathLoader = loaders.Load + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for the API specified by this spec +func (d *Document) BasePath() string { + if d.spec == nil { + return "" + } + return d.spec.BasePath +} + +// Version returns the OpenAPI version of this spec (e.g. 2.0) +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 meta-schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger object model for this API specification +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +// OrigSpec yields the original spec +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions yields a shallow copy with the models reset to the original spec +func (d *Document) ResetDefinitions() *Document { + d.spec.Definitions = make(map[string]spec.Schema, len(d.origSpec.Definitions)) + maps.Copy(d.spec.Definitions, d.origSpec.Definitions) + + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + raw, _ := json.Marshal(d.Spec()) + dd, _ := Analyzed(raw, d.Version()) + dd.pathLoader = d.pathLoader + dd.specFilePath = d.specFilePath + + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + + return &dst, nil +} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes new file mode 100644 index 00000000000..d207b1802b2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore new file mode 100644 index 00000000000..fea8b84eca9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml new file mode 100644 index 00000000000..0087ed31130 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.golangci.yml @@ -0,0 +1,77 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - err113 # disabled temporarily: there are just too many issues to address + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gomoddirectives # moved to mono-repo, multi-modules, so replace directives are needed + - gosmopolitan + - inamedparam + - ireturn + - lll + - musttag + - nestif + - nilerr # nilerr crashes on this repo + - nlreturn + - noinlineerr + - nonamedreturns + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md new file mode 100644 index 00000000000..9e15b1adb5b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -0,0 +1,43 @@ +# runtime [![Build Status](https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/runtime.svg)](https://pkg.go.dev/github.com/go-openapi/runtime) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/runtime)](https://goreportcard.com/report/github.com/go-openapi/runtime) + +# go OpenAPI toolkit runtime + +The runtime component for use in code generation or as untyped usage. + +## Release notes + +### v0.29.0 + +**New with this release**: + +* upgraded to `go1.24` and modernized the code base accordingly +* updated all dependencies, and removed an noticable indirect dependency (e.g. `mailru/easyjson`) +* **breaking change** no longer imports `opentracing-go` (#365). + * the `WithOpentracing()` method now returns an opentelemetry transport + * for users who can't transition to opentelemetry, the previous behavior + of `WithOpentracing` delivering an opentracing transport is provided by a separate + module `github.com/go-openapi/runtime/client-middleware/opentracing`. +* removed direct dependency to `gopkg.in/yaml.v3`, in favor of `go.yaml.in/yaml/v3` (an indirect + test dependency to the older package is still around) +* technically, the repo has evolved to a mono-repo, multiple modules structures (2 go modules + published), with CI adapted accordingly + +**What coming next?** + +Moving forward, we want to : + +* [ ] continue narrowing down the scope of dependencies: + * yaml support in an independent module + * introduce more up-to-date support for opentelemetry as a separate module that evolves + independently from the main package (to avoid breaking changes, the existing API + will remain maintained, but evolve at a slower pace than opentelemetry). +* [ ] fix a few known issues with some file upload requests (e.g. #286) + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go new file mode 100644 index 00000000000..eb649742e8e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -0,0 +1,211 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag/jsonutils" +) + +func defaultCloser() error { return nil } + +type byteStreamOpt func(opts *byteStreamOpts) + +// ClosesStream when the bytestream consumer or producer is finished +func ClosesStream(opts *byteStreamOpts) { + opts.Close = true +} + +type byteStreamOpts struct { + Close bool +} + +// ByteStreamConsumer creates a consumer for byte streams. +// +// The consumer consumes from a provided reader into the data passed by reference. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - io.ReaderFrom (for maximum control) +// - io.Writer (performs io.Copy) +// - encoding.BinaryUnmarshaler +// - *string +// - *[]byte +func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ConsumerFunc(func(reader io.Reader, data any) error { + if reader == nil { + return errors.New("ByteStreamConsumer requires a reader") // early exit + } + if data == nil { + return errors.New("nil destination for ByteStreamConsumer") + } + + closer := defaultCloser + if vals.Close { + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom { + _, err := readerFrom.ReadFrom(reader) + return err + } + + if writer, isDataWriter := data.(io.Writer); isDataWriter { + _, err := io.Copy(writer, reader) + return err + } + + // buffers input before writing to data + var buf bytes.Buffer + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + switch destinationPointer := data.(type) { + case encoding.BinaryUnmarshaler: + return destinationPointer.UnmarshalBinary(b) + case *any: + switch (*destinationPointer).(type) { + case string: + *destinationPointer = string(b) + + return nil + + case []byte: + *destinationPointer = b + + return nil + } + default: + // check for the underlying type to be pointer to []byte or string, + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } + + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + v.SetBytes(b) + return nil + + case t.Kind() == reflect.String: + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", + data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") + }) +} + +// ByteStreamProducer creates a producer for byte streams. +// +// The producer takes input data then writes to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - io.WriterTo (for maximum control) +// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting. +// - encoding.BinaryMarshaler +// - error (writes as a string) +// - []byte +// - string +// - struct, other slices: writes as JSON +func ByteStreamProducer(opts ...byteStreamOpt) Producer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ProducerFunc(func(writer io.Writer, data any) error { + if writer == nil { + return errors.New("ByteStreamProducer requires a writer") // early exit + } + if data == nil { + return errors.New("nil data for ByteStreamProducer") + } + + closer := defaultCloser + if vals.Close { + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { + defer rc.Close() + } + + switch origin := data.(type) { + case io.WriterTo: + _, err := origin.WriteTo(writer) + return err + + case io.Reader: + _, err := io.Copy(writer, origin) + return err + + case encoding.BinaryMarshaler: + bytes, err := origin.MarshalBinary() + if err != nil { + return err + } + + _, err = writer.Write(bytes) + return err + + case error: + _, err := writer.Write([]byte(origin.Error())) + return err + + default: + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + _, err := writer.Write(v.Bytes()) + return err + + case t.Kind() == reflect.String: + _, err := writer.Write([]byte(v.String())) + return err + + case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice: + b, err := jsonutils.WriteJSON(data) + if err != nil { + return err + } + + _, err = writer.Write(b) + return err + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", + data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client/auth_info.go b/vendor/github.com/go-openapi/runtime/client/auth_info.go new file mode 100644 index 00000000000..a98690c4d60 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/auth_info.go @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "encoding/base64" + + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// PassThroughAuth never manipulates the request +var PassThroughAuth runtime.ClientAuthInfoWriter + +func init() { + PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil }) +} + +// BasicAuth provides a basic auth info writer +func BasicAuth(username, password string) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + return r.SetHeaderParam(runtime.HeaderAuthorization, "Basic "+encoded) + }) +} + +// APIKeyAuth provides an API key auth info writer +func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter { + if in == "query" { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetQueryParam(name, value) + }) + } + + if in == "header" { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetHeaderParam(name, value) + }) + } + return nil +} + +// BearerToken provides a header based oauth2 bearer access token auth info writer +func BearerToken(token string) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetHeaderParam(runtime.HeaderAuthorization, "Bearer "+token) + }) +} + +// Compose combines multiple ClientAuthInfoWriters into a single one. +// Useful when multiple auth headers are needed. +func Compose(auths ...runtime.ClientAuthInfoWriter) runtime.ClientAuthInfoWriter { + return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { + for _, auth := range auths { + if auth == nil { + continue + } + if err := auth.AuthenticateRequest(r, nil); err != nil { + return err + } + } + return nil + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go new file mode 100644 index 00000000000..831d23b511d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/keepalive.go @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "io" + "net/http" + "sync/atomic" +) + +// KeepAliveTransport drains the remaining body from a response +// so that go will reuse the TCP connections. +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper { + return &keepAliveTransport{wrapped: rt} +} + +type keepAliveTransport struct { + wrapped http.RoundTripper +} + +func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := k.wrapped.RoundTrip(r) + if err != nil { + return resp, err + } + resp.Body = &drainingReadCloser{rdr: resp.Body} + return resp, nil +} + +type drainingReadCloser struct { + rdr io.ReadCloser + seenEOF uint32 +} + +func (d *drainingReadCloser) Read(p []byte) (n int, err error) { + n, err = d.rdr.Read(p) + if err == io.EOF || n == 0 { + atomic.StoreUint32(&d.seenEOF, 1) + } + return +} + +func (d *drainingReadCloser) Close() error { + // drain buffer + if atomic.LoadUint32(&d.seenEOF) != 1 { + // If the reader side (a HTTP server) is misbehaving, it still may send + // some bytes, but the closer ignores them to keep the underling + // connection open. + _, _ = io.Copy(io.Discard, d.rdr) + } + return d.rdr.Close() +} diff --git a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go new file mode 100644 index 00000000000..e77941293f9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go @@ -0,0 +1,216 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "fmt" + "net/http" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + instrumentationVersion = "1.0.0" + tracerName = "go-openapi" +) + +type config struct { + Tracer trace.Tracer + Propagator propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + SpanNameFormatter func(*runtime.ClientOperation) string + TracerProvider trace.TracerProvider +} + +type OpenTelemetryOpt interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) OpenTelemetryOpt { + return optionFunc(func(c *config) { + if provider != nil { + c.TracerProvider = provider + } + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) OpenTelemetryOpt { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagator = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) OpenTelemetryOpt { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(op *runtime.ClientOperation) string) OpenTelemetryOpt { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +func defaultTransportFormatter(op *runtime.ClientOperation) string { + if op.ID != "" { + return op.ID + } + + return fmt.Sprintf("%s_%s", strings.ToLower(op.Method), op.PathPattern) +} + +type openTelemetryTransport struct { + transport runtime.ClientTransport + host string + tracer trace.Tracer + config *config +} + +func newOpenTelemetryTransport(transport runtime.ClientTransport, host string, opts []OpenTelemetryOpt) *openTelemetryTransport { + tr := &openTelemetryTransport{ + transport: transport, + host: host, + } + + defaultOpts := []OpenTelemetryOpt{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + WithPropagators(otel.GetTextMapPropagator()), + WithTracerProvider(otel.GetTracerProvider()), + } + + c := newConfig(append(defaultOpts, opts...)...) + tr.config = c + + return tr +} + +func (t *openTelemetryTransport) Submit(op *runtime.ClientOperation) (any, error) { + if op.Context == nil { + return t.transport.Submit(op) + } + + params := op.Params + reader := op.Reader + + var span trace.Span + defer func() { + if span != nil { + span.End() + } + }() + + op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { + span = t.newOpenTelemetrySpan(op, req.GetHeaderParams()) + return params.WriteToRequest(req, reg) + }) + + op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + if span != nil { + statusCode := response.Code() + // NOTE: this is replaced by semconv.HTTPResponseStatusCode in semconv v1.21 + span.SetAttributes(semconv.HTTPResponseStatusCode(statusCode)) + // NOTE: the conversion from HTTP status code to trace code is no longer available with + // semconv v1.21 + const minHTTPStatusIsError = 400 + if statusCode >= minHTTPStatusIsError { + span.SetStatus(codes.Error, http.StatusText(statusCode)) + } + } + + return reader.ReadResponse(response, consumer) + }) + + submit, err := t.transport.Submit(op) + if err != nil && span != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + + return submit, err +} + +func (t *openTelemetryTransport) newOpenTelemetrySpan(op *runtime.ClientOperation, header http.Header) trace.Span { + ctx := op.Context + + tracer := t.tracer + if tracer == nil { + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, t.config.SpanNameFormatter(op), t.config.SpanStartOptions...) + + var scheme string + if len(op.Schemes) > 0 { + scheme = op.Schemes[0] + } + + span.SetAttributes( + attribute.String("net.peer.name", t.host), + attribute.String(string(semconv.HTTPRouteKey), op.PathPattern), + attribute.String(string(semconv.HTTPRequestMethodKey), op.Method), + attribute.String("span.kind", trace.SpanKindClient.String()), + attribute.String("http.scheme", scheme), + ) + + carrier := propagation.HeaderCarrier(header) + t.config.Propagator.Inject(ctx, carrier) + + return span +} + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(tracerName, trace.WithInstrumentationVersion(version())) +} + +func newConfig(opts ...OpenTelemetryOpt) *config { + c := &config{ + Propagator: otel.GetTextMapPropagator(), + } + + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + return c +} + +// Version is the current release version of the go-runtime instrumentation. +func version() string { + return instrumentationVersion +} diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go new file mode 100644 index 00000000000..6d9b25912ec --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/request.go @@ -0,0 +1,469 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +var _ runtime.ClientRequest = new(request) // ensure compliance to the interface + +// Request represents a swagger client request. +// +// This Request struct converts to a HTTP request. +// There might be others that convert to other transports. +// There is no error checking here, it is assumed to be used after a spec has been validated. +// so impossible combinations should not arise (hopefully). +// +// The main purpose of this struct is to hide the machinery of adding params to a transport request. +// The generated code only implements what is necessary to turn a param into a valid value for these methods. +type request struct { + pathPattern string + method string + writer runtime.ClientRequestWriter + + pathParams map[string]string + header http.Header + query url.Values + formFields url.Values + fileFields map[string][]runtime.NamedReadCloser + payload any + timeout time.Duration + buf *bytes.Buffer + + getBody func(r *request) []byte +} + +// NewRequest creates a new swagger http client request +func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) *request { + return &request{ + pathPattern: pathPattern, + method: method, + writer: writer, + header: make(http.Header), + query: make(url.Values), + timeout: DefaultTimeout, + getBody: getRequestBuffer, + } +} + +// BuildHTTP creates a new http request based on the data from the params +func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) { + return r.buildHTTP(mediaType, basePath, producers, registry, nil) +} + +func (r *request) GetMethod() string { + return r.method +} + +func (r *request) GetPath() string { + path := r.pathPattern + for k, v := range r.pathParams { + path = strings.ReplaceAll(path, "{"+k+"}", v) + } + return path +} + +func (r *request) GetBody() []byte { + return r.getBody(r) +} + +// SetHeaderParam adds a header param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetHeaderParam(name string, values ...string) error { + if r.header == nil { + r.header = make(http.Header) + } + r.header[http.CanonicalHeaderKey(name)] = values + return nil +} + +// GetHeaderParams returns the all headers currently set for the request +func (r *request) GetHeaderParams() http.Header { + return r.header +} + +// SetQueryParam adds a query param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetQueryParam(name string, values ...string) error { + if r.query == nil { + r.query = make(url.Values) + } + r.query[name] = values + return nil +} + +// GetQueryParams returns a copy of all query params currently set for the request +func (r *request) GetQueryParams() url.Values { + var result = make(url.Values) + for key, value := range r.query { + result[key] = append([]string{}, value...) + } + return result +} + +// SetFormParam adds a forn param to the request +// when there is only 1 value provided for the varargs, it will set it. +// when there are several values provided for the varargs it will add it (no overriding) +func (r *request) SetFormParam(name string, values ...string) error { + if r.formFields == nil { + r.formFields = make(url.Values) + } + r.formFields[name] = values + return nil +} + +// SetPathParam adds a path param to the request +func (r *request) SetPathParam(name string, value string) error { + if r.pathParams == nil { + r.pathParams = make(map[string]string) + } + + r.pathParams[name] = value + return nil +} + +// SetFileParam adds a file param to the request +func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error { + for _, file := range files { + if actualFile, ok := file.(*os.File); ok { + fi, err := os.Stat(actualFile.Name()) + if err != nil { + return err + } + if fi.IsDir() { + return fmt.Errorf("%q is a directory, only files are supported", file.Name()) + } + } + } + + if r.fileFields == nil { + r.fileFields = make(map[string][]runtime.NamedReadCloser) + } + if r.formFields == nil { + r.formFields = make(url.Values) + } + + r.fileFields[name] = files + return nil +} + +func (r *request) GetFileParam() map[string][]runtime.NamedReadCloser { + return r.fileFields +} + +// SetBodyParam sets a body parameter on the request. +// This does not yet serialze the object, this happens as late as possible. +func (r *request) SetBodyParam(payload any) error { + r.payload = payload + return nil +} + +func (r *request) GetBodyParam() any { + return r.payload +} + +// SetTimeout sets the timeout for a request +func (r *request) SetTimeout(timeout time.Duration) error { + r.timeout = timeout + return nil +} + +func (r *request) isMultipart(mediaType string) bool { + if len(r.fileFields) > 0 { + return true + } + + return runtime.MultipartFormMime == mediaType +} + +func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) { //nolint:gocyclo,maintidx + // build the data + if err := r.writer.WriteToRequest(r, registry); err != nil { + return nil, err + } + + // Our body must be an io.Reader. + // When we create the http.Request, if we pass it a + // bytes.Buffer then it will wrap it in an io.ReadCloser + // and set the content length automatically. + var body io.Reader + var pr *io.PipeReader + var pw *io.PipeWriter + + r.buf = bytes.NewBuffer(nil) + if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 { + body = r.buf + if r.isMultipart(mediaType) { + pr, pw = io.Pipe() + body = pr + } + } + + // check if this is a form type request + if len(r.formFields) > 0 || len(r.fileFields) > 0 { + if !r.isMultipart(mediaType) { + r.header.Set(runtime.HeaderContentType, mediaType) + formString := r.formFields.Encode() + r.buf.WriteString(formString) + goto DoneChoosingBodySource + } + + mp := multipart.NewWriter(pw) + r.header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary())) + + go func() { + defer func() { + mp.Close() + pw.Close() + }() + + for fn, v := range r.formFields { + for _, vi := range v { + if err := mp.WriteField(fn, vi); err != nil { + logClose(err, pw) + return + } + } + } + + defer func() { + for _, ff := range r.fileFields { + for _, ffi := range ff { + ffi.Close() + } + } + }() + for fn, f := range r.fileFields { + for _, fi := range f { + var fileContentType string + if p, ok := fi.(interface { + ContentType() string + }); ok { + fileContentType = p.ContentType() + } else { + // Need to read the data so that we can detect the content type + const contentTypeBufferSize = 512 + buf := make([]byte, contentTypeBufferSize) + size, err := fi.Read(buf) + if err != nil && err != io.EOF { + logClose(err, pw) + return + } + fileContentType = http.DetectContentType(buf) + fi = runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi)) + } + + // Create the MIME headers for the new part + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(fn), escapeQuotes(filepath.Base(fi.Name())))) + h.Set("Content-Type", fileContentType) + + wrtr, err := mp.CreatePart(h) + if err != nil { + logClose(err, pw) + return + } + if _, err := io.Copy(wrtr, fi); err != nil { + logClose(err, pw) + } + } + } + }() + + goto DoneChoosingBodySource + } + + // if there is payload, use the producer to write the payload, and then + // set the header to the content-type appropriate for the payload produced + if r.payload != nil { + // TODO: infer most appropriate content type based on the producer used, + // and the `consumers` section of the spec/operation + r.header.Set(runtime.HeaderContentType, mediaType) + if rdr, ok := r.payload.(io.ReadCloser); ok { + body = rdr + goto DoneChoosingBodySource + } + + if rdr, ok := r.payload.(io.Reader); ok { + body = rdr + goto DoneChoosingBodySource + } + + producer := producers[mediaType] + if err := producer.Produce(r.buf, r.payload); err != nil { + return nil, err + } + } + +DoneChoosingBodySource: + + if runtime.CanHaveBody(r.method) && body != nil && r.header.Get(runtime.HeaderContentType) == "" { + r.header.Set(runtime.HeaderContentType, mediaType) + } + + if auth != nil { + // If we're not using r.buf as our http.Request's body, + // either the payload is an io.Reader or io.ReadCloser, + // or we're doing a multipart form/file. + // + // In those cases, if the AuthenticateRequest call asks for the body, + // we must read it into a buffer and provide that, then use that buffer + // as the body of our http.Request. + // + // This is done in-line with the GetBody() request rather than ahead + // of time, because there's no way to know if the AuthenticateRequest + // will even ask for the body of the request. + // + // If for some reason the copy fails, there's no way to return that + // error to the GetBody() call, so return it afterwards. + // + // An error from the copy action is prioritized over any error + // from the AuthenticateRequest call, because the mis-read + // body may have interfered with the auth. + // + var copyErr error + if buf, ok := body.(*bytes.Buffer); body != nil && (!ok || buf != r.buf) { + var copied bool + r.getBody = func(r *request) []byte { + if copied { + return getRequestBuffer(r) + } + + defer func() { + copied = true + }() + + if _, copyErr = io.Copy(r.buf, body); copyErr != nil { + return nil + } + + if closer, ok := body.(io.ReadCloser); ok { + if copyErr = closer.Close(); copyErr != nil { + return nil + } + } + + body = r.buf + return getRequestBuffer(r) + } + } + + authErr := auth.AuthenticateRequest(r, registry) + + if copyErr != nil { + return nil, fmt.Errorf("error retrieving the response body: %v", copyErr) + } + + if authErr != nil { + return nil, authErr + } + } + + // In case the basePath or the request pathPattern include static query parameters, + // parse those out before constructing the final path. The parameters themselves + // will be merged with the ones set by the client, with the priority given first to + // the ones set by the client, then the path pattern, and lastly the base path. + basePathURL, err := url.Parse(basePath) + if err != nil { + return nil, err + } + staticQueryParams := basePathURL.Query() + + pathPatternURL, err := url.Parse(r.pathPattern) + if err != nil { + return nil, err + } + for name, values := range pathPatternURL.Query() { + if _, present := staticQueryParams[name]; present { + staticQueryParams.Del(name) + } + for _, value := range values { + staticQueryParams.Add(name, value) + } + } + + // create http request + var reinstateSlash bool + if pathPatternURL.Path != "" && pathPatternURL.Path != "/" && pathPatternURL.Path[len(pathPatternURL.Path)-1] == '/' { + reinstateSlash = true + } + + urlPath := path.Join(basePathURL.Path, pathPatternURL.Path) + for k, v := range r.pathParams { + urlPath = strings.ReplaceAll(urlPath, "{"+k+"}", url.PathEscape(v)) + } + if reinstateSlash { + urlPath += "/" + } + + req, err := http.NewRequestWithContext(context.Background(), r.method, urlPath, body) + if err != nil { + return nil, err + } + + originalParams := r.GetQueryParams() + + // Merge the query parameters extracted from the basePath with the ones set by + // the client in this struct. In case of conflict, the client wins. + for k, v := range staticQueryParams { + _, present := originalParams[k] + if !present { + if err = r.SetQueryParam(k, v...); err != nil { + return nil, err + } + } + } + + req.URL.RawQuery = r.query.Encode() + req.Header = r.header + + return req, nil +} + +func escapeQuotes(s string) string { + return strings.NewReplacer("\\", "\\\\", `"`, "\\\"").Replace(s) +} + +func getRequestBuffer(r *request) []byte { + if r.buf == nil { + return nil + } + return r.buf.Bytes() +} + +func logClose(err error, pw *io.PipeWriter) { + log.Println(err) + closeErr := pw.CloseWithError(err) + if closeErr != nil { + log.Println(closeErr) + } +} + +func mangleContentType(mediaType, boundary string) string { + if strings.ToLower(mediaType) == runtime.URLencodedFormMime { + return fmt.Sprintf("%s; boundary=%s", mediaType, boundary) + } + return "multipart/form-data; boundary=" + boundary +} diff --git a/vendor/github.com/go-openapi/runtime/client/response.go b/vendor/github.com/go-openapi/runtime/client/response.go new file mode 100644 index 00000000000..59abc3b549a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/response.go @@ -0,0 +1,39 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "io" + "net/http" + + "github.com/go-openapi/runtime" +) + +var _ runtime.ClientResponse = response{} + +func newResponse(resp *http.Response) runtime.ClientResponse { return response{resp: resp} } + +type response struct { + resp *http.Response +} + +func (r response) Code() int { + return r.resp.StatusCode +} + +func (r response) Message() string { + return r.resp.Status +} + +func (r response) GetHeader(name string) string { + return r.resp.Header.Get(name) +} + +func (r response) GetHeaders(name string) []string { + return r.resp.Header.Values(name) +} + +func (r response) Body() io.ReadCloser { + return r.resp.Body +} diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go new file mode 100644 index 00000000000..203c74e49db --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/runtime.go @@ -0,0 +1,564 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "mime" + "net/http" + "net/http/httputil" + "os" + "strings" + "sync" + "time" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/runtime/yamlpc" + "github.com/go-openapi/strfmt" +) + +const ( + schemeHTTP = "http" + schemeHTTPS = "https" +) + +// DefaultTimeout the default request timeout +var DefaultTimeout = 30 * time.Second + +// TLSClientOptions to configure client authentication with mutual TLS +type TLSClientOptions struct { + // Certificate is the path to a PEM-encoded certificate to be used for + // client authentication. If set then Key must also be set. + Certificate string + + // LoadedCertificate is the certificate to be used for client authentication. + // This field is ignored if Certificate is set. If this field is set, LoadedKey + // is also required. + LoadedCertificate *x509.Certificate + + // Key is the path to an unencrypted PEM-encoded private key for client + // authentication. This field is required if Certificate is set. + Key string + + // LoadedKey is the key for client authentication. This field is required if + // LoadedCertificate is set. + LoadedKey crypto.PrivateKey + + // CA is a path to a PEM-encoded certificate that specifies the root certificate + // to use when validating the TLS certificate presented by the server. If this field + // (and LoadedCA) is not set, the system certificate pool is used. This field is ignored if LoadedCA + // is set. + CA string + + // LoadedCA specifies the root certificate to use when validating the server's TLS certificate. + // If this field (and CA) is not set, the system certificate pool is used. + LoadedCA *x509.Certificate + + // LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate. + // If set, it will be combined with the other loaded certificates (see LoadedCA and CA). + // If neither LoadedCA or CA is set, the provided pool with override the system + // certificate pool. + // The caller must not use the supplied pool after calling TLSClientAuth. + LoadedCAPool *x509.CertPool + + // ServerName specifies the hostname to use when verifying the server certificate. + // If this field is set then InsecureSkipVerify will be ignored and treated as + // false. + ServerName string + + // InsecureSkipVerify controls whether the certificate chain and hostname presented + // by the server are validated. If true, any certificate is accepted. + InsecureSkipVerify bool + + // VerifyPeerCertificate, if not nil, is called after normal + // certificate verification. It receives the raw ASN.1 certificates + // provided by the peer and also any verified chains that normal processing found. + // If it returns a non-nil error, the handshake is aborted and that error results. + // + // If normal verification fails then the handshake will abort before + // considering this callback. If normal verification is disabled by + // setting InsecureSkipVerify then this callback will be considered but + // the verifiedChains argument will always be nil. + VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + + // SessionTicketsDisabled may be set to true to disable session ticket and + // PSK (resumption) support. Note that on clients, session ticket support is + // also disabled if ClientSessionCache is nil. + SessionTicketsDisabled bool + + // ClientSessionCache is a cache of ClientSessionState entries for TLS + // session resumption. It is only used by clients. + ClientSessionCache tls.ClientSessionCache + + // Prevents callers using unkeyed fields. + _ struct{} +} + +// TLSClientAuth creates a tls.Config for mutual auth +func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { + // create client tls config + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + // load client cert if specified + if opts.Certificate != "" { + cert, err := tls.LoadX509KeyPair(opts.Certificate, opts.Key) + if err != nil { + return nil, fmt.Errorf("tls client cert: %v", err) + } + cfg.Certificates = []tls.Certificate{cert} + } else if opts.LoadedCertificate != nil { + block := pem.Block{Type: "CERTIFICATE", Bytes: opts.LoadedCertificate.Raw} + certPem := pem.EncodeToMemory(&block) + + var keyBytes []byte + switch k := opts.LoadedKey.(type) { + case *rsa.PrivateKey: + keyBytes = x509.MarshalPKCS1PrivateKey(k) + case *ecdsa.PrivateKey: + var err error + keyBytes, err = x509.MarshalECPrivateKey(k) + if err != nil { + return nil, fmt.Errorf("tls client priv key: %v", err) + } + default: + return nil, errors.New("tls client priv key: unsupported key type") + } + + block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes} + keyPem := pem.EncodeToMemory(&block) + + cert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + return nil, fmt.Errorf("tls client cert: %v", err) + } + cfg.Certificates = []tls.Certificate{cert} + } + + cfg.InsecureSkipVerify = opts.InsecureSkipVerify + + cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate + cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled + cfg.ClientSessionCache = opts.ClientSessionCache + + // When no CA certificate is provided, default to the system cert pool + // that way when a request is made to a server known by the system trust store, + // the name is still verified + switch { + case opts.LoadedCA != nil: + caCertPool := basePool(opts.LoadedCAPool) + caCertPool.AddCert(opts.LoadedCA) + cfg.RootCAs = caCertPool + case opts.CA != "": + // load ca cert + caCert, err := os.ReadFile(opts.CA) + if err != nil { + return nil, fmt.Errorf("tls client ca: %v", err) + } + caCertPool := basePool(opts.LoadedCAPool) + caCertPool.AppendCertsFromPEM(caCert) + cfg.RootCAs = caCertPool + case opts.LoadedCAPool != nil: + cfg.RootCAs = opts.LoadedCAPool + } + + // apply servername overrride + if opts.ServerName != "" { + cfg.InsecureSkipVerify = false + cfg.ServerName = opts.ServerName + } + + return cfg, nil +} + +// TLSTransport creates a http client transport suitable for mutual tls auth +func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) { + cfg, err := TLSClientAuth(opts) + if err != nil { + return nil, err + } + + return &http.Transport{TLSClientConfig: cfg}, nil +} + +// TLSClient creates a http.Client for mutual auth +func TLSClient(opts TLSClientOptions) (*http.Client, error) { + transport, err := TLSTransport(opts) + if err != nil { + return nil, err + } + return &http.Client{Transport: transport}, nil +} + +// Runtime represents an API client that uses the transport +// to make http requests based on a swagger specification. +type Runtime struct { + DefaultMediaType string + DefaultAuthentication runtime.ClientAuthInfoWriter + Consumers map[string]runtime.Consumer + Producers map[string]runtime.Producer + + Transport http.RoundTripper + Jar http.CookieJar + // Spec *spec.Document + Host string + BasePath string + Formats strfmt.Registry + Context context.Context //nolint:containedctx // we precisely want this type to contain the request context + + Debug bool + logger logger.Logger + + clientOnce *sync.Once + client *http.Client + schemes []string + response ClientResponseFunc +} + +// New creates a new default runtime for a swagger api runtime.Client +func New(host, basePath string, schemes []string) *Runtime { + var rt Runtime + rt.DefaultMediaType = runtime.JSONMime + + // TODO: actually infer this stuff from the spec + rt.Consumers = map[string]runtime.Consumer{ + runtime.YAMLMime: yamlpc.YAMLConsumer(), + runtime.JSONMime: runtime.JSONConsumer(), + runtime.XMLMime: runtime.XMLConsumer(), + runtime.TextMime: runtime.TextConsumer(), + runtime.HTMLMime: runtime.TextConsumer(), + runtime.CSVMime: runtime.CSVConsumer(), + runtime.DefaultMime: runtime.ByteStreamConsumer(), + } + rt.Producers = map[string]runtime.Producer{ + runtime.YAMLMime: yamlpc.YAMLProducer(), + runtime.JSONMime: runtime.JSONProducer(), + runtime.XMLMime: runtime.XMLProducer(), + runtime.TextMime: runtime.TextProducer(), + runtime.HTMLMime: runtime.TextProducer(), + runtime.CSVMime: runtime.CSVProducer(), + runtime.DefaultMime: runtime.ByteStreamProducer(), + } + rt.Transport = http.DefaultTransport + rt.Jar = nil + rt.Host = host + rt.BasePath = basePath + rt.Context = context.Background() + rt.clientOnce = new(sync.Once) + if !strings.HasPrefix(rt.BasePath, "/") { + rt.BasePath = "/" + rt.BasePath + } + + rt.Debug = logger.DebugEnabled() + rt.logger = logger.StandardLogger{} + rt.response = newResponse + + if len(schemes) > 0 { + rt.schemes = schemes + } + return &rt +} + +// NewWithClient allows you to create a new transport with a configured http.Client +func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime { + rt := New(host, basePath, schemes) + if client != nil { + rt.clientOnce.Do(func() { + rt.client = client + }) + } + return rt +} + +// WithOpenTracing adds opentracing support to the provided runtime. +// A new client span is created for each request. +// If the context of the client operation does not contain an active span, no span is created. +// The provided opts are applied to each spans - for example to add global tags. +// +// Deprecated: use [WithOpenTelemetry] instead, as opentracing is now archived and superseded by opentelemetry. +// +// # Deprecation notice +// +// The [Runtime.WithOpenTracing] method has been deprecated in favor of [Runtime.WithOpenTelemetry]. +// +// The method is still around so programs calling it will still build. However, it will return +// an opentelemetry transport. +// +// If you have a strict requirement on using opentracing, you may still do so by importing +// module [github.com/go-openapi/runtime/client-middleware/opentracing] and using +// [github.com/go-openapi/runtime/client-middleware/opentracing.WithOpenTracing] with your +// usual opentracing options and opentracing-enabled transport. +// +// Passed options are ignored unless they are of type [OpenTelemetryOpt]. +func (r *Runtime) WithOpenTracing(opts ...any) runtime.ClientTransport { + otelOpts := make([]OpenTelemetryOpt, 0, len(opts)) + for _, o := range opts { + otelOpt, ok := o.(OpenTelemetryOpt) + if !ok { + continue + } + otelOpts = append(otelOpts, otelOpt) + } + + return r.WithOpenTelemetry(otelOpts...) +} + +// WithOpenTelemetry adds opentelemetry support to the provided runtime. +// A new client span is created for each request. +// If the context of the client operation does not contain an active span, no span is created. +// The provided opts are applied to each spans - for example to add global tags. +func (r *Runtime) WithOpenTelemetry(opts ...OpenTelemetryOpt) runtime.ClientTransport { + return newOpenTelemetryTransport(r, r.Host, opts) +} + +// EnableConnectionReuse drains the remaining body from a response +// so that go will reuse the TCP connections. +// +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func (r *Runtime) EnableConnectionReuse() { + if r.client == nil { + r.Transport = KeepAliveTransport( + transportOrDefault(r.Transport, http.DefaultTransport), + ) + return + } + + r.client.Transport = KeepAliveTransport( + transportOrDefault(r.client.Transport, + transportOrDefault(r.Transport, http.DefaultTransport), + ), + ) +} + +func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *http.Request, err error) { //nolint:revive + _, req, err = r.createHttpRequest(operation) + return +} + +// Submit a request and when there is a body on success it will turn that into the result +// all other things are turned into an api error for swagger which retains the status code +func (r *Runtime) Submit(operation *runtime.ClientOperation) (any, error) { + _, readResponse, _ := operation.Params, operation.Reader, operation.AuthInfo + + request, req, err := r.createHttpRequest(operation) + if err != nil { + return nil, err + } + + r.clientOnce.Do(func() { + r.client = &http.Client{ + Transport: r.Transport, + Jar: r.Jar, + } + }) + + if r.Debug { + b, err2 := httputil.DumpRequestOut(req, true) + if err2 != nil { + return nil, err2 + } + r.logger.Debugf("%s\n", string(b)) + } + + var parentCtx context.Context + switch { + case operation.Context != nil: + parentCtx = operation.Context + case r.Context != nil: + parentCtx = r.Context + default: + parentCtx = context.Background() + } + + var ( + ctx context.Context + cancel context.CancelFunc + ) + if request.timeout == 0 { + // There may be a deadline in the context passed to the operation. + // Otherwise, there is no timeout set. + ctx, cancel = context.WithCancel(parentCtx) + } else { + // Sets the timeout passed from request params (by default runtime.DefaultTimeout). + // If there is already a deadline in the parent context, the shortest will + // apply. + ctx, cancel = context.WithTimeout(parentCtx, request.timeout) + } + defer cancel() + + var client *http.Client + if operation.Client != nil { + client = operation.Client + } else { + client = r.client + } + req = req.WithContext(ctx) + res, err := client.Do(req) // make requests, by default follows 10 redirects before failing + if err != nil { + return nil, err + } + defer res.Body.Close() + + ct := res.Header.Get(runtime.HeaderContentType) + if ct == "" { // this should really never occur + ct = r.DefaultMediaType + } + + if r.Debug { + printBody := true + if ct == runtime.DefaultMime { + printBody = false // Spare the terminal from a binary blob. + } + b, err2 := httputil.DumpResponse(res, printBody) + if err2 != nil { + return nil, err2 + } + r.logger.Debugf("%s\n", string(b)) + } + + mt, _, err := mime.ParseMediaType(ct) + if err != nil { + return nil, fmt.Errorf("parse content type: %s", err) + } + + cons, ok := r.Consumers[mt] + if !ok { + if cons, ok = r.Consumers["*/*"]; !ok { + // scream about not knowing what to do + return nil, fmt.Errorf("no consumer: %q", ct) + } + } + return readResponse.ReadResponse(r.response(res), cons) +} + +// SetDebug changes the debug flag. +// It ensures that client and middlewares have the set debug level. +func (r *Runtime) SetDebug(debug bool) { + r.Debug = debug + middleware.Debug = debug +} + +// SetLogger changes the logger stream. +// It ensures that client and middlewares use the same logger. +func (r *Runtime) SetLogger(logger logger.Logger) { + r.logger = logger + middleware.Logger = logger +} + +type ClientResponseFunc = func(*http.Response) runtime.ClientResponse //nolint:revive + +// SetResponseReader changes the response reader implementation. +func (r *Runtime) SetResponseReader(f ClientResponseFunc) { + if f == nil { + return + } + r.response = f +} + +func (r *Runtime) pickScheme(schemes []string) string { + if v := r.selectScheme(r.schemes); v != "" { + return v + } + if v := r.selectScheme(schemes); v != "" { + return v + } + return schemeHTTP +} + +func (r *Runtime) selectScheme(schemes []string) string { + schLen := len(schemes) + if schLen == 0 { + return "" + } + + scheme := schemes[0] + // prefer https, but skip when not possible + if scheme != schemeHTTPS && schLen > 1 { + for _, sch := range schemes { + if sch == schemeHTTPS { + scheme = sch + break + } + } + } + return scheme +} + +func transportOrDefault(left, right http.RoundTripper) http.RoundTripper { + if left == nil { + return right + } + return left +} + +// takes a client operation and creates equivalent http.Request +func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { //nolint:revive + params, _, auth := operation.Params, operation.Reader, operation.AuthInfo + + request := newRequest(operation.Method, operation.PathPattern, params) + + var accept []string + accept = append(accept, operation.ProducesMediaTypes...) + if err := request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { + return nil, nil, err + } + + if auth == nil && r.DefaultAuthentication != nil { + auth = runtime.ClientAuthInfoWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { + if req.GetHeaderParams().Get(runtime.HeaderAuthorization) != "" { + return nil + } + return r.DefaultAuthentication.AuthenticateRequest(req, reg) + }) + } + // if auth != nil { + // if err := auth.AuthenticateRequest(request, r.Formats); err != nil { + // return nil, err + // } + //} + + // TODO: pick appropriate media type + cmt := r.DefaultMediaType + for _, mediaType := range operation.ConsumesMediaTypes { + // Pick first non-empty media type + if mediaType != "" { + cmt = mediaType + break + } + } + + if _, ok := r.Producers[cmt]; !ok && cmt != runtime.MultipartFormMime && cmt != runtime.URLencodedFormMime { + return nil, nil, fmt.Errorf("none of producers: %v registered. try %s", r.Producers, cmt) + } + + req, err := request.buildHTTP(cmt, r.BasePath, r.Producers, r.Formats, auth) + if err != nil { + return nil, nil, err + } + req.URL.Scheme = r.pickScheme(operation.Schemes) + req.URL.Host = r.Host + req.Host = r.Host + return request, req, nil +} + +func basePool(pool *x509.CertPool) *x509.CertPool { + if pool == nil { + return x509.NewCertPool() + } + return pool +} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go new file mode 100644 index 00000000000..581e64451a2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import "github.com/go-openapi/strfmt" + +// A ClientAuthInfoWriterFunc converts a function to a request writer interface +type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error + +// AuthenticateRequest adds authentication data to the request +func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +type ClientAuthInfoWriter interface { + AuthenticateRequest(ClientRequest, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go new file mode 100644 index 00000000000..b0bb0977db5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + "net/http" +) + +// ClientOperation represents the context for a swagger operation to be submitted to the transport +type ClientOperation struct { + ID string + Method string + PathPattern string + ProducesMediaTypes []string + ConsumesMediaTypes []string + Schemes []string + AuthInfo ClientAuthInfoWriter + Params ClientRequestWriter + Reader ClientResponseReader + Context context.Context //nolint:containedctx // we precisely want this type to contain the request context + Client *http.Client +} + +// A ClientTransport implementor knows how to submit Request objects to some destination +type ClientTransport interface { + // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + Submit(*ClientOperation) (any, error) +} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go new file mode 100644 index 00000000000..6e335b36f32 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "io" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" +) + +// ClientRequestWriterFunc converts a function to a request writer interface +type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error + +// WriteToRequest adds data to the request +func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// ClientRequestWriter is an interface for things that know how to write to a request +type ClientRequestWriter interface { + WriteToRequest(ClientRequest, strfmt.Registry) error +} + +// ClientRequest is an interface for things that know how to +// add information to a swagger client request. +type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters + SetHeaderParam(string, ...string) error + + GetHeaderParams() http.Header + + SetQueryParam(string, ...string) error + + SetFormParam(string, ...string) error + + SetPathParam(string, string) error + + GetQueryParams() url.Values + + SetFileParam(string, ...NamedReadCloser) error + + SetBodyParam(any) error + + SetTimeout(time.Duration) error + + GetMethod() string + + GetPath() string + + GetBody() []byte + + GetBodyParam() any + + GetFileParam() map[string][]NamedReadCloser +} + +// NamedReadCloser represents a named ReadCloser interface +type NamedReadCloser interface { + io.ReadCloser + Name() string +} + +// NamedReader creates a NamedReadCloser for use as file upload +func NamedReader(name string, rdr io.Reader) NamedReadCloser { + rc, ok := rdr.(io.ReadCloser) + if !ok { + rc = io.NopCloser(rdr) + } + return &namedReadCloser{ + name: name, + cr: rc, + } +} + +type namedReadCloser struct { + name string + cr io.ReadCloser +} + +func (n *namedReadCloser) Close() error { + return n.cr.Close() +} +func (n *namedReadCloser) Read(p []byte) (int, error) { + return n.cr.Read(p) +} +func (n *namedReadCloser) Name() string { + return n.name +} + +type TestClientRequest struct { + Headers http.Header + Body any +} + +func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { + if t.Headers == nil { + t.Headers = make(http.Header) + } + t.Headers.Set(name, values[0]) + return nil +} + +func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } + +func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } + +func (t *TestClientRequest) SetBodyParam(body any) error { + t.Body = body + return nil +} + +func (t *TestClientRequest) SetTimeout(time.Duration) error { + return nil +} + +func (t *TestClientRequest) GetQueryParams() url.Values { return nil } + +func (t *TestClientRequest) GetMethod() string { return "" } + +func (t *TestClientRequest) GetPath() string { return "" } + +func (t *TestClientRequest) GetBody() []byte { return nil } + +func (t *TestClientRequest) GetBodyParam() any { + return t.Body +} + +func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { + return nil +} + +func (t *TestClientRequest) GetHeaderParams() http.Header { + return t.Headers +} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go new file mode 100644 index 00000000000..f2cf942ab36 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -0,0 +1,109 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "strings" +) + +// A ClientResponse represents a client response. +// +// This bridges between responses obtained from different transports +type ClientResponse interface { + Code() int + Message() string + GetHeader(string) string + GetHeaders(string) []string + Body() io.ReadCloser +} + +// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +type ClientResponseReaderFunc func(ClientResponse, Consumer) (any, error) + +// ReadResponse reads the response +func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (any, error) { + return read(resp, consumer) +} + +// A ClientResponseReader is an interface for things want to read a response. +// An application of this is to create structs from response values +type ClientResponseReader interface { + ReadResponse(ClientResponse, Consumer) (any, error) +} + +// APIError wraps an error model and captures the status code +type APIError struct { + OperationName string + Response any + Code int +} + +// NewAPIError creates a new API error +func NewAPIError(opName string, payload any, code int) *APIError { + return &APIError{ + OperationName: opName, + Response: payload, + Code: code, + } +} + +// sanitizer ensures that single quotes are escaped +var sanitizer = strings.NewReplacer(`\`, `\\`, `'`, `\'`) + +func (o *APIError) Error() string { + var resp []byte + if err, ok := o.Response.(error); ok { + resp = []byte("'" + sanitizer.Replace(err.Error()) + "'") + } else { + resp, _ = json.Marshal(o.Response) + } + + return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) +} + +func (o *APIError) String() string { + return o.Error() +} + +// IsSuccess returns true when this API response returns a 2xx status code +func (o *APIError) IsSuccess() bool { + const statusOK = 2 + return o.Code/100 == statusOK +} + +// IsRedirect returns true when this API response returns a 3xx status code +func (o *APIError) IsRedirect() bool { + const statusRedirect = 3 + return o.Code/100 == statusRedirect +} + +// IsClientError returns true when this API response returns a 4xx status code +func (o *APIError) IsClientError() bool { + const statusClientError = 4 + return o.Code/100 == statusClientError +} + +// IsServerError returns true when this API response returns a 5xx status code +func (o *APIError) IsServerError() bool { + const statusServerError = 5 + return o.Code/100 == statusServerError +} + +// IsCode returns true when this API response returns a given status code +func (o *APIError) IsCode(code int) bool { + return o.Code == code +} + +// A ClientResponseStatus is a common interface implemented by all responses on the generated code +// You can use this to treat any client response based on status code +type ClientResponseStatus interface { + IsSuccess() bool + IsRedirect() bool + IsClientError() bool + IsServerError() bool + IsCode(int) bool +} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go new file mode 100644 index 00000000000..62ae9eec0cf --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +const ( + // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + HeaderContentType = "Content-Type" + + // HeaderTransferEncoding represents a http transfer-encoding header. + HeaderTransferEncoding = "Transfer-Encoding" + + // HeaderAccept the Accept header + HeaderAccept = "Accept" + // HeaderAuthorization the Authorization header + HeaderAuthorization = "Authorization" + + charsetKey = "charset" + + // DefaultMime the default fallback mime type + DefaultMime = "application/octet-stream" + // JSONMime the json mime type + JSONMime = "application/json" + // YAMLMime the yaml mime type + YAMLMime = "application/x-yaml" + // XMLMime the xml mime type + XMLMime = "application/xml" + // TextMime the text mime type + TextMime = "text/plain" + // HTMLMime the html mime type + HTMLMime = "text/html" + // CSVMime the csv mime type + CSVMime = "text/csv" + // MultipartFormMime the multipart form mime type + MultipartFormMime = "multipart/form-data" + // URLencodedFormMime the url encoded form mime type + URLencodedFormMime = "application/x-www-form-urlencoded" +) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go new file mode 100644 index 00000000000..567e3d9db24 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -0,0 +1,339 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "bytes" + "context" + "encoding" + "encoding/csv" + "errors" + "fmt" + "io" + "reflect" + + "golang.org/x/sync/errgroup" +) + +// CSVConsumer creates a new CSV consumer. +// +// The consumer consumes CSV records from a provided reader into the data passed by reference. +// +// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...). +// The defaults are those of the standard library's csv.Reader and csv.Writer. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - *csv.Writer +// - CSVWriter (writer options are ignored) +// - io.Writer (as raw bytes) +// - io.ReaderFrom (as raw bytes) +// - encoding.BinaryUnmarshaler (as raw bytes) +// - *[][]string (as a collection of records) +// - *[]byte (as raw bytes) +// - *string (a raw bytes) +// +// The consumer prioritizes situations where buffering the input is not required. +func CSVConsumer(opts ...CSVOpt) Consumer { + o := csvOptsWithDefaults(opts) + + return ConsumerFunc(func(reader io.Reader, data any) error { + if reader == nil { + return errors.New("CSVConsumer requires a reader") + } + if data == nil { + return errors.New("nil destination for CSVConsumer") + } + + csvReader := csv.NewReader(reader) + o.applyToReader(csvReader) + closer := defaultCloser + if o.closeStream { + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + switch destination := data.(type) { + case *csv.Writer: + csvWriter := destination + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVWriter: + csvWriter := destination + // no writer options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Writer: + csvWriter := csv.NewWriter(destination) + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case io.ReaderFrom: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + _, err := destination.ReadFrom(&buf) + + return err + + case encoding.BinaryUnmarshaler: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + + return destination.UnmarshalBinary(buf.Bytes()) + + default: + // support *[][]string, *[]byte, *string + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } + + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvWriter := &csvRecordsWriter{} + // writer options are ignored + if err := pipeCSV(csvWriter, csvReader, o); err != nil { + return err + } + + v.Grow(len(csvWriter.records)) + v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity + v.SetLen(len(csvWriter.records)) + reflect.Copy(v, reflect.ValueOf(csvWriter.records)) + + return nil + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetBytes(buf.Bytes()) + + return nil + + case t.Kind() == reflect.String: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetString(buf.String()) + + return nil + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s", + data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface", + ) + } + } + }) +} + +// CSVProducer creates a new CSV producer. +// +// The producer takes input data then writes as CSV to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - *csv.Reader +// - CSVReader (reader options are ignored) +// - io.Reader +// - io.WriterTo +// - encoding.BinaryMarshaler +// - [][]string +// - []byte +// - string +// +// The producer prioritizes situations where buffering the input is not required. +func CSVProducer(opts ...CSVOpt) Producer { + o := csvOptsWithDefaults(opts) + + return ProducerFunc(func(writer io.Writer, data any) error { + if writer == nil { + return errors.New("CSVProducer requires a writer") + } + if data == nil { + return errors.New("nil data for CSVProducer") + } + + csvWriter := csv.NewWriter(writer) + o.applyToWriter(csvWriter) + closer := defaultCloser + if o.closeStream { + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { + defer rc.Close() + } + + switch origin := data.(type) { + case *csv.Reader: + csvReader := origin + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVReader: + csvReader := origin + // no reader options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Reader: + csvReader := csv.NewReader(origin) + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case io.WriterTo: + // async piping of the writes performed by WriteTo + r, w := io.Pipe() + csvReader := csv.NewReader(r) + o.applyToReader(csvReader) + + pipe, _ := errgroup.WithContext(context.Background()) + pipe.Go(func() error { + _, err := origin.WriteTo(w) + _ = w.Close() + return err + }) + + pipe.Go(func() error { + defer func() { + _ = r.Close() + }() + + return pipeCSV(csvWriter, csvReader, o) + }) + + return pipe.Wait() + + case encoding.BinaryMarshaler: + buf, err := origin.MarshalBinary() + if err != nil { + return err + } + rdr := bytes.NewBuffer(buf) + csvReader := csv.NewReader(rdr) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + // support [][]string, []byte, string (or pointers to those) + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvReader := &csvRecordsWriter{ + records: make([][]string, v.Len()), + } + reflect.Copy(reflect.ValueOf(csvReader.records), v) + + return pipeCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + buf := bytes.NewBuffer(v.Bytes()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.String: + buf := bytes.NewBufferString(v.String()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s", + data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface", + ) + } + } + }) +} + +// pipeCSV copies CSV records from a CSV reader to a CSV writer +func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + for { + record, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return err + } + + if err := csvWriter.Write(record); err != nil { + return err + } + } + + csvWriter.Flush() + + return csvWriter.Error() +} + +// bufferedCSV copies CSV records from a CSV reader to a CSV writer, +// by first reading all records then writing them at once. +func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + records, err := csvReader.ReadAll() + if err != nil { + return err + } + + return csvWriter.WriteAll(records) +} diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go new file mode 100644 index 00000000000..4cc04390010 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv_options.go @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "encoding/csv" + "io" +) + +// CSVOpt alter the behavior of the CSV consumer or producer. +type CSVOpt func(*csvOpts) + +type csvOpts struct { + csvReader csv.Reader + csvWriter csv.Writer + skippedLines int + closeStream bool +} + +// WithCSVReaderOpts specifies the options to csv.Reader +// when reading CSV. +func WithCSVReaderOpts(reader csv.Reader) CSVOpt { + return func(o *csvOpts) { + o.csvReader = reader + } +} + +// WithCSVWriterOpts specifies the options to csv.Writer +// when writing CSV. +func WithCSVWriterOpts(writer csv.Writer) CSVOpt { + return func(o *csvOpts) { + o.csvWriter = writer + } +} + +// WithCSVSkipLines will skip header lines. +func WithCSVSkipLines(skipped int) CSVOpt { + return func(o *csvOpts) { + o.skippedLines = skipped + } +} + +func WithCSVClosesStream() CSVOpt { + return func(o *csvOpts) { + o.closeStream = true + } +} + +func (o csvOpts) applyToReader(in *csv.Reader) { + if o.csvReader.Comma != 0 { + in.Comma = o.csvReader.Comma + } + if o.csvReader.Comment != 0 { + in.Comment = o.csvReader.Comment + } + if o.csvReader.FieldsPerRecord != 0 { + in.FieldsPerRecord = o.csvReader.FieldsPerRecord + } + + in.LazyQuotes = o.csvReader.LazyQuotes + in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace + in.ReuseRecord = o.csvReader.ReuseRecord +} + +func (o csvOpts) applyToWriter(in *csv.Writer) { + if o.csvWriter.Comma != 0 { + in.Comma = o.csvWriter.Comma + } + in.UseCRLF = o.csvWriter.UseCRLF +} + +func csvOptsWithDefaults(opts []CSVOpt) csvOpts { + var o csvOpts + for _, apply := range opts { + apply(&o) + } + + return o +} + +type CSVWriter interface { + Write([]string) error + Flush() + Error() error +} + +type CSVReader interface { + Read() ([]string, error) +} + +var ( + _ CSVWriter = &csvRecordsWriter{} + _ CSVReader = &csvRecordsWriter{} +) + +// csvRecordsWriter is an internal container to move CSV records back and forth +type csvRecordsWriter struct { + i int + records [][]string +} + +func (w *csvRecordsWriter) Write(record []string) error { + w.records = append(w.records, record) + + return nil +} + +func (w *csvRecordsWriter) Read() ([]string, error) { + if w.i >= len(w.records) { + return nil, io.EOF + } + defer func() { + w.i++ + }() + + return w.records[w.i], nil +} + +func (w *csvRecordsWriter) Flush() {} + +func (w *csvRecordsWriter) Error() error { + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go new file mode 100644 index 00000000000..b05678becd9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/discard.go @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import "io" + +// DiscardConsumer does absolutely nothing, it's a black hole. +var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ any) error { return nil }) + +// DiscardProducer does absolutely nothing, it's a black hole. +var DiscardProducer = ProducerFunc(func(_ io.Writer, _ any) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go new file mode 100644 index 00000000000..2a85379a748 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/file.go @@ -0,0 +1,8 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import "github.com/go-openapi/swag/fileutils" + +type File = fileutils.File diff --git a/vendor/github.com/go-openapi/runtime/go.work b/vendor/github.com/go-openapi/runtime/go.work new file mode 100644 index 00000000000..b4cd9e01e86 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/go.work @@ -0,0 +1,6 @@ +use ( + . + ./client-middleware/opentracing +) + +go 1.24.0 diff --git a/vendor/github.com/go-openapi/runtime/go.work.sum b/vendor/github.com/go-openapi/runtime/go.work.sum new file mode 100644 index 00000000000..b0c2c9a63de --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/go.work.sum @@ -0,0 +1,93 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.22.0/go.mod h1:xt3jV88UtExdIkkL7NloURjRQjbeUgcxFblMjq2iaiU= +github.com/go-openapi/jsonreference v0.21.1/go.mod h1:PWs8rO4xxTUqKGu+lEvvCxD5k2X7QYkKAepJyCmSTT8= +github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A= +github.com/go-openapi/swag/cmdutils v0.24.0/go.mod h1:uxib2FAeQMByyHomTlsP8h1TtPd54Msu2ZDU/H5Vuf8= +github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c= +github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90= +github.com/go-openapi/swag/jsonname v0.24.0/go.mod h1:GXqrPzGJe611P7LG4QB9JKPtUZ7flE4DOVechNaDd7Q= +github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0= +github.com/go-openapi/swag/loading v0.24.0/go.mod h1:gShCN4woKZYIxPxbfbyHgjXAhO61m88tmjy0lp/LkJk= +github.com/go-openapi/swag/mangling v0.24.0/go.mod h1:Jm5Go9LHkycsz0wfoaBDkdc4CkpuSnIEf62brzyCbhc= +github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaPR0HnPbmArSRN1SkZM= +github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w= +github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI= +github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= +golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488/go.mod h1:fGb/2+tgXXjhjHsTNdVEEMZNWA0quBnfrO+AfoDSAKw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go new file mode 100644 index 00000000000..510e396ca73 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/headers.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "mime" + "net/http" + + "github.com/go-openapi/errors" +) + +// ContentType parses a content type header +func ContentType(headers http.Header) (string, string, error) { + ct := headers.Get(HeaderContentType) + orig := ct + if ct == "" { + ct = DefaultMime + } + if ct == "" { + return "", "", nil + } + + mt, opts, err := mime.ParseMediaType(ct) + if err != nil { + return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) + } + + if cs, ok := opts[charsetKey]; ok { + return mt, cs, nil + } + + return mt, "", nil +} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go new file mode 100644 index 00000000000..90046bf367e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/interfaces.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/go-openapi/strfmt" +) + +// OperationHandlerFunc an adapter for a function to the OperationHandler interface +type OperationHandlerFunc func(any) (any, error) + +// Handle implements the operation handler interface +func (s OperationHandlerFunc) Handle(data any) (any, error) { + return s(data) +} + +// OperationHandler a handler for a swagger operation +type OperationHandler interface { + Handle(any) (any, error) +} + +// ConsumerFunc represents a function that can be used as a consumer +type ConsumerFunc func(io.Reader, any) error + +// Consume consumes the reader into the data parameter +func (fn ConsumerFunc) Consume(reader io.Reader, data any) error { + return fn(reader, data) +} + +// Consumer implementations know how to bind the values on the provided interface to +// data provided by the request body +type Consumer interface { + // Consume performs the binding of request values + Consume(io.Reader, any) error +} + +// ProducerFunc represents a function that can be used as a producer +type ProducerFunc func(io.Writer, any) error + +// Produce produces the response for the provided data +func (f ProducerFunc) Produce(writer io.Writer, data any) error { + return f(writer, data) +} + +// Producer implementations know how to turn the provided interface into a valid +// HTTP response +type Producer interface { + // Produce writes to the http response + Produce(io.Writer, any) error +} + +// AuthenticatorFunc turns a function into an authenticator +type AuthenticatorFunc func(any) (bool, any, error) + +// Authenticate authenticates the request with the provided data +func (f AuthenticatorFunc) Authenticate(params any) (bool, any, error) { + return f(params) +} + +// Authenticator represents an authentication strategy +// implementations of Authenticator know how to authenticate the +// request data and translate that into a valid principal object or an error +type Authenticator interface { + Authenticate(any) (bool, any, error) +} + +// AuthorizerFunc turns a function into an authorizer +type AuthorizerFunc func(*http.Request, any) error + +// Authorize authorizes the processing of the request for the principal +func (f AuthorizerFunc) Authorize(r *http.Request, principal any) error { + return f(r, principal) +} + +// Authorizer represents an authorization strategy +// implementations of Authorizer know how to authorize the principal object +// using the request data and returns error if unauthorized +type Authorizer interface { + Authorize(*http.Request, any) error +} + +// Validatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the validations obtained from the spec +type Validatable interface { + Validate(strfmt.Registry) error +} + +// ContextValidatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the context validations obtained from the spec +type ContextValidatable interface { + ContextValidate(context.Context, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go new file mode 100644 index 00000000000..8f93eebfaa2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/json.go @@ -0,0 +1,27 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONConsumer creates a new JSON consumer +func JSONConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data any) error { + dec := json.NewDecoder(reader) + dec.UseNumber() // preserve number formats + return dec.Decode(data) + }) +} + +// JSONProducer creates a new JSON producer +func JSONProducer() Producer { + return ProducerFunc(func(writer io.Writer, data any) error { + enc := json.NewEncoder(writer) + enc.SetEscapeHTML(false) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/logger/logger.go b/vendor/github.com/go-openapi/runtime/logger/logger.go new file mode 100644 index 00000000000..45484deb593 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/logger/logger.go @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package logger + +import "os" + +type Logger interface { + Printf(format string, args ...any) + Debugf(format string, args ...any) +} + +func DebugEnabled() bool { + d := os.Getenv("SWAGGER_DEBUG") + if d != "" && d != "false" && d != "0" { + return true + } + d = os.Getenv("DEBUG") + if d != "" && d != "false" && d != "0" { + return true + } + return false +} diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go new file mode 100644 index 00000000000..48ba27f4a3d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/logger/standard.go @@ -0,0 +1,27 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package logger + +import ( + "fmt" + "os" +) + +var _ Logger = StandardLogger{} + +type StandardLogger struct{} + +func (StandardLogger) Printf(format string, args ...any) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} + +func (StandardLogger) Debugf(format string, args ...any) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go new file mode 100644 index 00000000000..bb00b93b89b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/context.go @@ -0,0 +1,714 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + stdContext "context" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "sync" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware/untyped" + "github.com/go-openapi/runtime/security" +) + +// Debug when true turns on verbose logging +var Debug = logger.DebugEnabled() + +// Logger is the standard libray logger used for printing debug messages +var Logger logger.Logger = logger.StandardLogger{} + +func debugLogfFunc(lg logger.Logger) func(string, ...any) { + if logger.DebugEnabled() { + if lg == nil { + return Logger.Debugf + } + + return lg.Debugf + } + + // muted logger + return func(_ string, _ ...any) {} +} + +// A Builder can create middlewares +type Builder func(http.Handler) http.Handler + +// PassthroughBuilder returns the handler, aka the builder identity function +func PassthroughBuilder(handler http.Handler) http.Handler { return handler } + +// RequestBinder is an interface for types to implement +// when they want to be able to bind from a request +type RequestBinder interface { + BindRequest(*http.Request, *MatchedRoute) error +} + +// Responder is an interface for types to implement +// when they want to be considered for writing HTTP responses +type Responder interface { + WriteResponse(http.ResponseWriter, runtime.Producer) +} + +// ResponderFunc wraps a func as a Responder interface +type ResponderFunc func(http.ResponseWriter, runtime.Producer) + +// WriteResponse writes to the response +func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { + fn(rw, pr) +} + +// Context is a type safe wrapper around an untyped request context +// used throughout to store request context with the standard context attached +// to the http.Request +type Context struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + router Router + debugLogf func(string, ...any) // a logging function to debug context and all components using it +} + +type routableUntypedAPI struct { + api *untyped.API + hlock *sync.Mutex + handlers map[string]map[string]http.Handler + defaultConsumes string + defaultProduces string +} + +func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI { + var handlers map[string]map[string]http.Handler + if spec == nil || api == nil { + return nil + } + analyzer := analysis.New(spec.Spec()) + for method, hls := range analyzer.Operations() { + um := strings.ToUpper(method) + for path, op := range hls { + schemes := analyzer.SecurityRequirementsFor(op) + + if oh, ok := api.OperationHandlerFor(method, path); ok { + if handlers == nil { + handlers = make(map[string]map[string]http.Handler) + } + if b, ok := handlers[um]; !ok || b == nil { + handlers[um] = make(map[string]http.Handler) + } + + var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // lookup route info in the context + route, rCtx, _ := context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + // bind and validate the request using reflection + var bound any + var validation error + bound, r, validation = context.BindAndValidate(r, route) + if validation != nil { + context.Respond(w, r, route.Produces, route, validation) + return + } + + // actually handle the request + result, err := oh.Handle(bound) + if err != nil { + // respond with failure + context.Respond(w, r, route.Produces, route, err) + return + } + + // respond with success + context.Respond(w, r, route.Produces, route, result) + }) + + if len(schemes) > 0 { + handler = newSecureAPI(context, handler) + } + handlers[um][path] = handler + } + } + } + + return &routableUntypedAPI{ + api: api, + hlock: new(sync.Mutex), + handlers: handlers, + defaultProduces: api.DefaultProduces, + defaultConsumes: api.DefaultConsumes, + } +} + +func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) { + r.hlock.Lock() + paths, ok := r.handlers[strings.ToUpper(method)] + if !ok { + r.hlock.Unlock() + return nil, false + } + handler, ok := paths[path] + r.hlock.Unlock() + return handler, ok +} +func (r *routableUntypedAPI) ServeErrorFor(_ string) func(http.ResponseWriter, *http.Request, error) { + return r.api.ServeError +} +func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + return r.api.ConsumersFor(mediaTypes) +} +func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + return r.api.ProducersFor(mediaTypes) +} +func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + return r.api.AuthenticatorsFor(schemes) +} +func (r *routableUntypedAPI) Authorizer() runtime.Authorizer { + return r.api.Authorizer() +} +func (r *routableUntypedAPI) Formats() strfmt.Registry { + return r.api.Formats() +} + +func (r *routableUntypedAPI) DefaultProduces() string { + return r.defaultProduces +} + +func (r *routableUntypedAPI) DefaultConsumes() string { + return r.defaultConsumes +} + +// NewRoutableContext creates a new context for a routable API. +// +// If a nil Router is provided, the DefaultRouter (denco-based) will be used. +func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + + return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes) +} + +// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes as input an already analysed spec. +// +// If a nil Router is provided, the DefaultRouter (denco-based) will be used. +func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context { + // Either there are no spec doc and analysis, or both of them. + if (spec != nil || an != nil) && (spec == nil || an == nil) { + panic(fmt.Errorf("%d: %s", http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them")) + } + + return &Context{ + spec: spec, + api: routableAPI, + analyzer: an, + router: routes, + debugLogf: debugLogfFunc(nil), + } +} + +// NewContext creates a new context wrapper. +// +// If a nil Router is provided, the DefaultRouter (denco-based) will be used. +func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + ctx := &Context{ + spec: spec, + analyzer: an, + router: routes, + debugLogf: debugLogfFunc(nil), + } + ctx.api = newRoutableUntypedAPI(spec, api, ctx) + + return ctx +} + +// Serve serves the specified spec with the specified api registrations as a http.Handler +func Serve(spec *loads.Document, api *untyped.API) http.Handler { + return ServeWithBuilder(spec, api, PassthroughBuilder) +} + +// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated +// by the Builder +func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { + context := NewContext(spec, api, nil) + return context.APIHandler(builder) +} + +type contextKey int8 + +const ( + _ contextKey = iota + ctxContentType + ctxResponseFormat + ctxMatchedRoute + ctxBoundParams + ctxSecurityPrincipal + ctxSecurityScopes +) + +// MatchedRouteFrom request context value. +func MatchedRouteFrom(req *http.Request) *MatchedRoute { + mr := req.Context().Value(ctxMatchedRoute) + if mr == nil { + return nil + } + if res, ok := mr.(*MatchedRoute); ok { + return res + } + return nil +} + +// SecurityPrincipalFrom request context value. +func SecurityPrincipalFrom(req *http.Request) any { + return req.Context().Value(ctxSecurityPrincipal) +} + +// SecurityScopesFrom request context value. +func SecurityScopesFrom(req *http.Request) []string { + rs := req.Context().Value(ctxSecurityScopes) + if res, ok := rs.([]string); ok { + return res + } + return nil +} + +type contentTypeValue struct { + MediaType string + Charset string +} + +// BasePath returns the base path for this API +func (c *Context) BasePath() string { + if c.spec == nil { + return "" + } + return c.spec.BasePath() +} + +// SetLogger allows for injecting a logger to catch debug entries. +// +// The logger is enabled in DEBUG mode only. +func (c *Context) SetLogger(lg logger.Logger) { + c.debugLogf = debugLogfFunc(lg) +} + +// RequiredProduces returns the accepted content types for responses +func (c *Context) RequiredProduces() []string { + return c.analyzer.RequiredProduces() +} + +// BindValidRequest binds a params object to a request but only when the request is valid +// if the request is not valid an error will be returned +func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { + var res []error + var requestContentType string + + // check and validate content type, select consumer + if runtime.HasBody(request) { + ct, _, err := runtime.ContentType(request.Header) + if err != nil { + res = append(res, err) + } else { + c.debugLogf("validating content type for %q against [%s]", ct, strings.Join(route.Consumes, ", ")) + if err := validateContentType(route.Consumes, ct); err != nil { + res = append(res, err) + } + if len(res) == 0 { + cons, ok := route.Consumers[ct] + if !ok { + res = append(res, errors.New(http.StatusInternalServerError, "no consumer registered for %s", ct)) + } else { + route.Consumer = cons + requestContentType = ct + } + } + } + } + + // check and validate the response format + if len(res) == 0 { + // if the route does not provide Produces and a default contentType could not be identified + // based on a body, typical for GET and DELETE requests, then default contentType to. + if len(route.Produces) == 0 && requestContentType == "" { + requestContentType = "*/*" + } + + if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" { + res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces)) + } + } + + // now bind the request with the provided binder + // it's assumed the binder will also validate the request and return an error if the + // request is invalid + if binder != nil && len(res) == 0 { + if err := binder.BindRequest(request, route); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContentType gets the parsed value of a content type +// Returns the media type, its charset and a shallow copy of the request +// when its context doesn't contain the content type value, otherwise it returns +// the same request +// Returns the error that runtime.ContentType may retunrs. +func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok { + return v.MediaType, v.Charset, request, nil + } + + mt, cs, err := runtime.ContentType(request.Header) + if err != nil { + return "", "", nil, err + } + rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs}) + return mt, cs, request.WithContext(rCtx), nil +} + +// LookupRoute looks a route up and returns true when it is found +func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { + if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { + return route, ok + } + return nil, false +} + +// RouteInfo tries to match a route for this request +// Returns the matched route, a shallow copy of the request if its context +// contains the matched router, otherwise the same request, and a bool to +// indicate if it the request matches one of the routes, if it doesn't +// then it returns false and nil for the other two return values +func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok { + return v, request, ok + } + + if route, ok := c.LookupRoute(request); ok { + rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route) + return route, request.WithContext(rCtx), ok + } + + return nil, nil, false +} + +// ResponseFormat negotiates the response content type +// Returns the response format and a shallow copy of the request if its context +// doesn't contain the response format, otherwise the same request +func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { + var rCtx = r.Context() + + if v, ok := rCtx.Value(ctxResponseFormat).(string); ok { + c.debugLogf("[%s %s] found response format %q in context", r.Method, r.URL.Path, v) + return v, r + } + + format := NegotiateContentType(r, offers, "") + if format != "" { + c.debugLogf("[%s %s] set response format %q in context", r.Method, r.URL.Path, format) + r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format)) + } + c.debugLogf("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format) + return format, r +} + +// AllowedMethods gets the allowed methods for the path of this request +func (c *Context) AllowedMethods(request *http.Request) []string { + return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) +} + +// ResetAuth removes the current principal from the request context +func (c *Context) ResetAuth(request *http.Request) *http.Request { + rctx := request.Context() + rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) + rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil) + return request.WithContext(rctx) +} + +// Authorize authorizes the request +// Returns the principal object and a shallow copy of the request when its +// context doesn't contain the principal, otherwise the same request or an error +// (the last) if one of the authenticators returns one or an Unauthenticated error +func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (any, *http.Request, error) { + if route == nil || !route.HasAuth() { + return nil, nil, nil + } + + var rCtx = request.Context() + if v := rCtx.Value(ctxSecurityPrincipal); v != nil { + return v, request, nil + } + + applies, usr, err := route.Authenticators.Authenticate(request, route) + if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil { + if err != nil { + return nil, nil, err + } + return nil, nil, errors.Unauthenticated("invalid credentials") + } + if route.Authorizer != nil { + if err := route.Authorizer.Authorize(request, usr); err != nil { + if _, ok := err.(errors.Error); ok { + return nil, nil, err + } + + return nil, nil, errors.New(http.StatusForbidden, "%v", err) + } + } + + rCtx = request.Context() + + rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr) + rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes()) + return usr, request.WithContext(rCtx), nil +} + +// BindAndValidate binds and validates the request +// Returns the validation map and a shallow copy of the request when its context +// doesn't contain the validation, otherwise it returns the same request or an +// CompositeValidationError error +func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (any, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok { + c.debugLogf("got cached validation (valid: %t)", len(v.result) == 0) + if len(v.result) > 0 { + return v.bound, request, errors.CompositeValidationError(v.result...) + } + return v.bound, request, nil + } + result := validateRequest(c, request, matched) + rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result) + request = request.WithContext(rCtx) + if len(result.result) > 0 { + return result.bound, request, errors.CompositeValidationError(result.result...) + } + c.debugLogf("no validation errors found") + return result.bound, request, nil +} + +// NotFound the default not found responder for when no route has been matched yet +func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { + c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) +} + +// Respond renders the response after doing some content negotiation +func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data any) { + c.debugLogf("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) + offers := []string{} + for _, mt := range produces { + if mt != c.api.DefaultProduces() { + offers = append(offers, mt) + } + } + // the default producer is last so more specific producers take precedence + offers = append(offers, c.api.DefaultProduces()) + c.debugLogf("offers: %v", offers) + + var format string + format, r = c.ResponseFormat(r, offers) + rw.Header().Set(runtime.HeaderContentType, format) + + if resp, ok := data.(Responder); ok { + producers := route.Producers + // producers contains keys with normalized format, if a format has MIME type parameter such as `text/plain; charset=utf-8` + // then you must provide `text/plain` to get the correct producer. HOWEVER, format here is not normalized. + prod, ok := producers[normalizeOffer(format)] + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(fmt.Errorf("%d: %s", http.StatusInternalServerError, cantFindProducer(format))) + } + prod = pr + } + resp.WriteResponse(rw, prod) + return + } + + if err, ok := data.(error); ok { + if format == "" { + rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime) + } + + if realm := security.FailedBasicAuth(r); realm != "" { + rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm)) + } + + if route == nil || route.Operation == nil { + c.api.ServeErrorFor("")(rw, r, err) + return + } + c.api.ServeErrorFor(route.Operation.ID)(rw, r, err) + return + } + + if route == nil || route.Operation == nil { + rw.WriteHeader(http.StatusOK) + if r.Method == http.MethodHead { + return + } + producers := c.api.ProducersFor(normalizeOffers(offers)) + prod, ok := producers[format] + if !ok { + panic(fmt.Errorf("%d: %s", http.StatusInternalServerError, cantFindProducer(format))) + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + if _, code, ok := route.Operation.SuccessResponse(); ok { + rw.WriteHeader(code) + if code == http.StatusNoContent || r.Method == http.MethodHead { + return + } + + producers := route.Producers + prod, ok := producers[format] + if !ok { + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(fmt.Errorf("%d: %s", http.StatusInternalServerError, cantFindProducer(format))) + } + prod = pr + } + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + c.api.ServeErrorFor(route.Operation.ID)(rw, r, fmt.Errorf("%d: %s", http.StatusInternalServerError, "can't produce response")) +} + +// APIHandlerSwaggerUI returns a handler to serve the API. +// +// This handler includes a swagger spec, router and the contract defined in the swagger spec. +// +// A spec UI (SwaggerUI) is served at {API base path}/docs and the spec document at /swagger.json +// (these can be modified with uiOptions). +func (c *Context) APIHandlerSwaggerUI(builder Builder, opts ...UIOption) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + + specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts) + var swaggerUIOpts SwaggerUIOpts + fromCommonToAnyOptions(uiOpts, &swaggerUIOpts) + + return Spec(specPath, c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)), specOpts...) +} + +// APIHandlerRapiDoc returns a handler to serve the API. +// +// This handler includes a swagger spec, router and the contract defined in the swagger spec. +// +// A spec UI (RapiDoc) is served at {API base path}/docs and the spec document at /swagger.json +// (these can be modified with uiOptions). +func (c *Context) APIHandlerRapiDoc(builder Builder, opts ...UIOption) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + + specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts) + var rapidocUIOpts RapiDocOpts + fromCommonToAnyOptions(uiOpts, &rapidocUIOpts) + + return Spec(specPath, c.spec.Raw(), RapiDoc(rapidocUIOpts, c.RoutesHandler(b)), specOpts...) +} + +// APIHandler returns a handler to serve the API. +// +// This handler includes a swagger spec, router and the contract defined in the swagger spec. +// +// A spec UI (Redoc) is served at {API base path}/docs and the spec document at /swagger.json +// (these can be modified with uiOptions). +func (c *Context) APIHandler(builder Builder, opts ...UIOption) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + + specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts) + var redocOpts RedocOpts + fromCommonToAnyOptions(uiOpts, &redocOpts) + + return Spec(specPath, c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)), specOpts...) +} + +// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec +func (c *Context) RoutesHandler(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + return NewRouter(c, b(NewOperationExecutor(c))) +} + +func (c Context) uiOptionsForHandler(opts []UIOption) (string, uiOptions, []SpecOption) { + var title string + sp := c.spec.Spec() + if sp != nil && sp.Info != nil && sp.Info.Title != "" { + title = sp.Info.Title + } + + // default options (may be overridden) + optsForContext := []UIOption{ + WithUIBasePath(c.BasePath()), + WithUITitle(title), + } + optsForContext = append(optsForContext, opts...) + uiOpts := uiOptionsWithDefaults(optsForContext) + + // If spec URL is provided, there is a non-default path to serve the spec. + // This makes sure that the UI middleware is aligned with the Spec middleware. + u, _ := url.Parse(uiOpts.SpecURL) + var specPath string + if u != nil { + specPath = u.Path + } + + pth, doc := path.Split(specPath) + if pth == "." { + pth = "" + } + + return pth, uiOpts, []SpecOption{WithSpecDocument(doc)} +} + +func cantFindProducer(format string) string { + return "can't find a producer for " + format +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE new file mode 100644 index 00000000000..e65039ad84c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Naoya Inada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md new file mode 100644 index 00000000000..30109e17d5e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md @@ -0,0 +1,180 @@ +# Denco [![Build Status](https://travis-ci.org/naoina/denco.png?branch=master)](https://travis-ci.org/naoina/denco) + +The fast and flexible HTTP request router for [Go](http://golang.org). + +Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter). +However, Denco is optimized and some features added. + +## Features + +* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) +* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) +* Small (but enough) URL router API +* HTTP request multiplexer like `http.ServeMux` + +## Installation + + go get -u github.com/go-openapi/runtime/middleware/denco + +## Using as HTTP request multiplexer + +```go +package main + +import ( + "fmt" + "log" + "net/http" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func Index(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Welcome to Denco!\n") +} + +func User(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Hello %s!\n", params.Get("name")) +} + +func main() { + mux := denco.NewMux() + handler, err := mux.Build([]denco.Handler{ + mux.GET("/", Index), + mux.GET("/user/:name", User), + mux.POST("/user/:name", User), + }) + if err != nil { + panic(err) + } + log.Fatal(http.ListenAndServe(":8080", handler)) +} +``` + +## Using as URL router + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +type route struct { + name string +} + +func main() { + router := denco.New() + router.Build([]denco.Record{ + {"/", &route{"root"}}, + {"/user/:id", &route{"user"}}, + {"/user/:name/:id", &route{"username"}}, + {"/static/*filepath", &route{"static"}}, + }) + + data, params, found := router.Lookup("/") + // print `&main.route{name:"root"}, denco.Params(nil), true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge") + // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge/7") + // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/static/path/to/file") + // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) +} +``` + +See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details. + +## Getting the value of path parameter + +You can get the value of path parameter by 2 ways. + +1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method +2. Find by loop + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func main() { + router := denco.New() + if err := router.Build([]denco.Record{ + {"/user/:name/:id", "route1"}, + }); err != nil { + panic(err) + } + + // 1. Using denco.Params.Get method. + _, params, _ := router.Lookup("/user/alice/1") + name := params.Get("name") + if name != "" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + + // 2. Find by loop. + for _, param := range params { + if param.Name == "name" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + } +} +``` + +## URL patterns + +Denco's route matching strategy is "most nearly matching". + +When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`. +Because URI `/alice` is more match with the route `/alice` than `/:name`. + +For more example, when routes below have been built: + +``` +/user/alice +/user/:name +/user/:name/:id +/user/alice/:id +/user/:id/bob +``` + +Routes matching are: + +``` +/user/alice => "/user/alice" (no match with "/user/:name") +/user/bob => "/user/:name" +/user/naoina/1 => "/user/:name/1" +/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id") +/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id") +/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob") +``` + +## Limitation + +Denco has some limitations below. + +* Number of param records (such as `/:name`) must be less than 2^22 +* Number of elements of internal slice must be less than 2^22 + +## Benchmarks + + cd $GOPATH/github.com/go-openapi/runtime/middleware/denco + go test -bench . -benchmem + +## License + +Denco is licensed under the MIT License. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go new file mode 100644 index 00000000000..b371a2cf84e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go @@ -0,0 +1,479 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package denco provides fast URL router. +package denco + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +const ( + // ParamCharacter is a special character for path parameter. + ParamCharacter = ':' + + // WildcardCharacter is a special character for wildcard path parameter. + WildcardCharacter = '*' + + // TerminationCharacter is a special character for end of path. + TerminationCharacter = '#' + + // SeparatorCharacter separates path segments. + SeparatorCharacter = '/' + + // PathParamCharacter indicates a RESTCONF path param + PathParamCharacter = '=' + + // MaxSize is max size of records and internal slice. + MaxSize = (1 << 22) - 1 //nolint:mnd +) + +// Router represents a URL router. +type Router struct { + param *doubleArray + // SizeHint expects the maximum number of path parameters in records to Build. + // SizeHint will be used to determine the capacity of the memory to allocate. + // By default, SizeHint will be determined from given records to Build. + SizeHint int + + static map[string]any +} + +// New returns a new Router. +func New() *Router { + return &Router{ + SizeHint: -1, + static: make(map[string]any), + param: newDoubleArray(), + } +} + +// Lookup returns data and path parameters that associated with path. +// params is a slice of the Param that arranged in the order in which parameters appeared. +// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. +func (rt *Router) Lookup(path string) (data any, params Params, found bool) { + if data, found = rt.static[path]; found { + return data, nil, true + } + if len(rt.param.node) == 1 { + return nil, nil, false + } + nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1) + if !found { + return nil, nil, false + } + for i := range params { + params[i].Name = nd.paramNames[i] + } + return nd.data, params, true +} + +// Build builds URL router from records. +func (rt *Router) Build(records []Record) error { + statics, params := makeRecords(records) + if len(params) > MaxSize { + return errors.New("denco: too many records") + } + if rt.SizeHint < 0 { + rt.SizeHint = 0 + for _, p := range params { + size := 0 + for _, k := range p.Key { + if k == ParamCharacter || k == WildcardCharacter { + size++ + } + } + if size > rt.SizeHint { + rt.SizeHint = size + } + } + } + for _, r := range statics { + rt.static[r.Key] = r.Value + } + if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil { + return err + } + return nil +} + +// Param represents name and value of path parameter. +type Param struct { + Name string + Value string +} + +// Params represents the name and value of path parameters. +type Params []Param + +// Get gets the first value associated with the given name. +// If there are no values associated with the key, Get returns "". +func (ps Params) Get(name string) string { + for _, p := range ps { + if p.Name == name { + return p.Value + } + } + return "" +} + +type doubleArray struct { + bc []baseCheck + node []*node +} + +func newDoubleArray() *doubleArray { + return &doubleArray{ + bc: []baseCheck{0}, + node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. + } +} + +// baseCheck contains BASE, CHECK and Extra flags. +// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. +// +// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) +// +// |----------------------|--|--------| +// 32 10 8 0 +type baseCheck uint32 + +const ( + flagsBits = 10 + checkBits = 8 +) + +func (bc baseCheck) Base() int { + return int(bc >> flagsBits) +} + +func (bc *baseCheck) SetBase(base int) { + *bc |= baseCheck(base) << flagsBits //nolint:gosec // integer conversion is ok +} + +func (bc baseCheck) Check() byte { + return byte(bc) +} + +func (bc *baseCheck) SetCheck(check byte) { + *bc |= baseCheck(check) +} + +func (bc baseCheck) IsEmpty() bool { + return bc&0xfffffcff == 0 +} + +func (bc baseCheck) IsSingleParam() bool { + return bc¶mTypeSingle == paramTypeSingle +} + +func (bc baseCheck) IsWildcardParam() bool { + return bc¶mTypeWildcard == paramTypeWildcard +} + +func (bc baseCheck) IsAnyParam() bool { + return bc¶mTypeAny != 0 +} + +func (bc *baseCheck) SetSingleParam() { + *bc |= (1 << checkBits) +} + +func (bc *baseCheck) SetWildcardParam() { + *bc |= (1 << (checkBits + 1)) +} + +const ( + paramTypeSingle = 0x0100 + paramTypeWildcard = 0x0200 + paramTypeAny = 0x0300 + + indexOffset = 32 + indexMask = uint64(0xffffffff) +) + +func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) { + indices := make([]uint64, 0, 1) + for i := range len(path) { + if da.bc[idx].IsAnyParam() { + indices = append(indices, (uint64(i)<= len(da.bc) || da.bc[idx].Check() != c { + goto BACKTRACKING + } + } + if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter { + return da.node[da.bc[next].Base()], params, true + } + +BACKTRACKING: + for j := len(indices) - 1; j >= 0; j-- { + i, idx := int(indices[j]>>indexOffset), int(indices[j]&indexMask) //nolint:gosec // integer conversion is okay + if da.bc[idx].IsSingleParam() { + nextIdx := nextIndex(da.bc[idx].Base(), ParamCharacter) + if nextIdx >= len(da.bc) { + break + } + + next := NextSeparator(path, i) + nextParams := params + nextParams = append(nextParams, Param{Value: path[i:next]}) + if nd, nextNextParams, found := da.lookup(path[next:], nextParams, nextIdx); found { + return nd, nextNextParams, true + } + } + + if da.bc[idx].IsWildcardParam() { + nextIdx := nextIndex(da.bc[idx].Base(), WildcardCharacter) + nextParams := params + nextParams = append(nextParams, Param{Value: path[i:]}) + return da.node[da.bc[nextIdx].Base()], nextParams, true + } + } + return nil, nil, false +} + +// build builds double-array from records. +func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error { + sort.Stable(recordSlice(srcs)) + base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) + if err != nil { + return err + } + if leaf != nil { + nd, err := makeNode(leaf) + if err != nil { + return err + } + da.bc[idx].SetBase(len(da.node)) + da.node = append(da.node, nd) + } + for _, sib := range siblings { + da.setCheck(nextIndex(base, sib.c), sib.c) + } + for _, sib := range siblings { + records := srcs[sib.start:sib.end] + switch sib.c { + case ParamCharacter: + for _, r := range records { + next := NextSeparator(r.Key, depth+1) + name := r.Key[depth+1 : next] + r.paramNames = append(r.paramNames, name) + r.Key = r.Key[next:] + } + da.bc[idx].SetSingleParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + case WildcardCharacter: + r := records[0] + name := r.Key[depth+1 : len(r.Key)-1] + r.paramNames = append(r.paramNames, name) + r.Key = "" + da.bc[idx].SetWildcardParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + default: + if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil { + return err + } + } + } + return nil +} + +// setBase sets BASE. +func (da *doubleArray) setBase(i, base int) { + da.bc[i].SetBase(base) +} + +// setCheck sets CHECK. +func (da *doubleArray) setCheck(i int, check byte) { + da.bc[i].SetCheck(check) +} + +// findEmptyIndex returns an index of unused BASE/CHECK node. +func (da *doubleArray) findEmptyIndex(start int) int { + i := start + for ; i < len(da.bc); i++ { + if da.bc[i].IsEmpty() { + break + } + } + return i +} + +// findBase returns good BASE. +func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { + for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { + base = nextIndex(idx, firstChar) + if _, used := usedBase[base]; used { + continue + } + i := 0 + for ; i < len(siblings); i++ { + next := nextIndex(base, siblings[i].c) + if len(da.bc) <= next { + da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) + } + if !da.bc[next].IsEmpty() { + break + } + } + if i == len(siblings) { + break + } + } + usedBase[base] = struct{}{} + return base +} + +func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { + siblings, leaf, err = makeSiblings(records, depth) + if err != nil { + return -1, nil, nil, err + } + if len(siblings) < 1 { + return -1, nil, leaf, nil + } + base = da.findBase(siblings, idx, usedBase) + if base > MaxSize { + return -1, nil, nil, errors.New("denco: too many elements of internal slice") + } + da.setBase(idx, base) + return base, siblings, leaf, err +} + +// node represents a node of Double-Array. +type node struct { + data any + + // Names of path parameters. + paramNames []string +} + +// makeNode returns a new node from record. +func makeNode(r *record) (*node, error) { + dups := make(map[string]bool) + for _, name := range r.paramNames { + if dups[name] { + return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key) + } + dups[name] = true + } + return &node{data: r.Value, paramNames: r.paramNames}, nil +} + +// sibling represents an intermediate data of build for Double-Array. +type sibling struct { + // An index of start of duplicated characters. + start int + + // An index of end of duplicated characters. + end int + + // A character of sibling. + c byte +} + +// nextIndex returns a next index of array of BASE/CHECK. +func nextIndex(base int, c byte) int { + return base ^ int(c) +} + +// makeSiblings returns slice of sibling. +func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) { + var ( + pc byte + n int + ) + for i, r := range records { + if len(r.Key) <= depth { + leaf = r + continue + } + c := r.Key[depth] + switch { + case pc < c: + sib = append(sib, sibling{start: i, c: c}) + case pc == c: + continue + default: + return nil, nil, errors.New("denco: BUG: routing table hasn't been sorted") + } + if n > 0 { + sib[n-1].end = i + } + pc = c + n++ + } + if n == 0 { + return nil, leaf, nil + } + sib[n-1].end = len(records) + return sib, leaf, nil +} + +// Record represents a record data for router construction. +type Record struct { + // Key for router construction. + Key string + + // Result value for Key. + Value any +} + +// NewRecord returns a new Record. +func NewRecord(key string, value any) Record { + return Record{ + Key: key, + Value: value, + } +} + +// record represents a record that use to build the Double-Array. +type record struct { + Record + + paramNames []string +} + +// makeRecords returns the records that use to build Double-Arrays. +func makeRecords(srcs []Record) (statics, params []*record) { + termChar := string(TerminationCharacter) + paramPrefix := string(SeparatorCharacter) + string(ParamCharacter) + wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter) + restconfPrefix := string(PathParamCharacter) + string(ParamCharacter) + for _, r := range srcs { + if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) || strings.Contains(r.Key, restconfPrefix) { + r.Key += termChar + params = append(params, &record{Record: r}) + } else { + statics = append(statics, &record{Record: r}) + } + } + return statics, params +} + +// recordSlice represents a slice of Record for sort and implements the sort.Interface. +type recordSlice []*record + +// Len implements the sort.Interface.Len. +func (rs recordSlice) Len() int { + return len(rs) +} + +// Less implements the sort.Interface.Less. +func (rs recordSlice) Less(i, j int) bool { + return rs[i].Key < rs[j].Key +} + +// Swap implements the sort.Interface.Swap. +func (rs recordSlice) Swap(i, j int) { + rs[i], rs[j] = rs[j], rs[i] +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go new file mode 100644 index 00000000000..8f04d93dba9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go @@ -0,0 +1,109 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package denco + +import ( + "net/http" +) + +// Mux represents a multiplexer for HTTP request. +type Mux struct{} + +// NewMux returns a new Mux. +func NewMux() *Mux { + return &Mux{} +} + +// GET is shorthand of Mux.Handler("GET", path, handler). +func (m *Mux) GET(path string, handler HandlerFunc) Handler { + return m.Handler("GET", path, handler) +} + +// POST is shorthand of Mux.Handler("POST", path, handler). +func (m *Mux) POST(path string, handler HandlerFunc) Handler { + return m.Handler("POST", path, handler) +} + +// PUT is shorthand of Mux.Handler("PUT", path, handler). +func (m *Mux) PUT(path string, handler HandlerFunc) Handler { + return m.Handler("PUT", path, handler) +} + +// HEAD is shorthand of Mux.Handler("HEAD", path, handler). +func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { + return m.Handler("HEAD", path, handler) +} + +// Handler returns a handler for HTTP method. +func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { + return Handler{ + Method: method, + Path: path, + Func: handler, + } +} + +// Build builds a http.Handler. +func (m *Mux) Build(handlers []Handler) (http.Handler, error) { + recordMap := make(map[string][]Record) + for _, h := range handlers { + recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func)) + } + mux := newServeMux() + for m, records := range recordMap { + router := New() + if err := router.Build(records); err != nil { + return nil, err + } + mux.routers[m] = router + } + return mux, nil +} + +// Handler represents a handler of HTTP request. +type Handler struct { + // Method is an HTTP method. + Method string + + // Path is a routing path for handler. + Path string + + // Func is a function of handler of HTTP request. + Func HandlerFunc +} + +// The HandlerFunc type is aliased to type of handler function. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) + +type serveMux struct { + routers map[string]*Router +} + +func newServeMux() *serveMux { + return &serveMux{ + routers: make(map[string]*Router), + } +} + +// ServeHTTP implements http.Handler interface. +func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + handler, params := mux.handler(r.Method, r.URL.Path) + handler(w, r, params) +} + +func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) { + if router, found := mux.routers[method]; found { + if handler, params, found := router.Lookup(path); found { + return handler.(HandlerFunc), params + } + } + return NotFound, nil +} + +// NotFound replies to the request with an HTTP 404 not found error. +// NotFound is called when unknown HTTP method or a handler not found. +// If you want to use the your own NotFound handler, please overwrite this variable. +var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) { + http.NotFound(w, r) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go new file mode 100644 index 00000000000..f002bc4693f --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package denco + +// NextSeparator returns an index of next separator in path. +func NextSeparator(path string, start int) int { + for start < len(path) { + if c := path[start]; c == '/' || c == TerminationCharacter { + break + } + start++ + } + return start +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go new file mode 100644 index 00000000000..04b83223638 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/doc.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +/* +Package middleware provides the library with helper functions for serving swagger APIs. + +Pseudo middleware handler + + import ( + "net/http" + + "github.com/go-openapi/errors" + ) + + func newCompleteMiddleware(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + if matched, ok := ctx.RouteInfo(r); ok { + + if matched.NeedsAuth() { + if _, err := ctx.Authorize(r, matched); err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + } + + bound, validation := ctx.BindAndValidate(r, matched) + if validation != nil { + ctx.Respond(rw, r, matched.Produces, matched, validation) + return + } + + result, err := matched.Handler.Handle(bound) + if err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + + ctx.Respond(rw, r, matched.Produces, matched, result) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) + }) + } +*/ +package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go new file mode 100644 index 00000000000..6ce870d8936 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/header/header.go @@ -0,0 +1,339 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "maps" + "net/http" + "strings" + "time" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +const ( + asciiMaxControlChar = 31 + asciiMaxChar = 127 +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := range 256 { + var t octetType + isCtl := c <= asciiMaxControlChar || c == asciiMaxChar + isChar := 0 <= c && c <= asciiMaxChar + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// Copy returns a shallow copy of the header. +func Copy(header http.Header) http.Header { + h := make(http.Header) + maps.Copy(h, header) + return h +} + +var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} + +// ParseTime parses the header as time. The zero value is returned if the +// header is not present or there is an error parsing the +// header. +func ParseTime(header http.Header, key string) time.Time { + if s := header.Get(key); s != "" { + for _, layout := range timeLayouts { + if t, err := time.Parse(layout, s); err == nil { + return t.UTC() + } + } + } + return time.Time{} +} + +// ParseList parses a comma separated list of values. Commas are ignored in +// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is +// trimmed. +func ParseList(header http.Header, key string) []string { + var result []string + for _, s := range header[http.CanonicalHeaderKey(key)] { + begin := 0 + end := 0 + escape := false + quote := false + for i := range len(s) { + b := s[i] + switch { + case escape: + escape = false + end = i + 1 + case quote: + switch b { + case '\\': + escape = true + case '"': + quote = false + } + end = i + 1 + case b == '"': + quote = true + end = i + 1 + case octetTypes[b]&isSpace != 0: + if begin == end { + begin = i + 1 + end = begin + } + case b == ',': + if begin < end { + result = append(result, s[begin:end]) + } + begin = i + 1 + end = begin + default: + end = i + 1 + } + } + if begin < end { + result = append(result, s[begin:end]) + } + } + return result +} + +// ParseValueAndParams parses a comma separated list of values with optional +// semicolon separated name-value pairs. Content-Type and Content-Disposition +// headers are in this format. +func ParseValueAndParams(header http.Header, key string) (string, map[string]string) { + return parseValueAndParams(header.Get(key)) +} + +func parseValueAndParams(s string) (value string, params map[string]string) { + params = make(map[string]string) + value, s = expectTokenSlash(s) + if value == "" { + return + } + value = strings.ToLower(value) + s = skipSpace(s) + for strings.HasPrefix(s, ";") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +// AcceptSpec ... +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept2 ... +func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) { + for _, en := range ParseList(header, key) { + v, p := parseValueAndParams(en) + var spec AcceptSpec + spec.Value = v + spec.Q = 1.0 + if p != nil { + if q, ok := p["q"]; ok { + spec.Q, _ = expectQuality(q) + } + } + if spec.Q < 0.0 { + continue + } + specs = append(specs, spec) + } + + return +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) []AcceptSpec { + var specs []AcceptSpec +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") { + s = skipSpace(s[1:]) + } + if strings.HasPrefix(s, "q=") { + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + } + + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + + return specs +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + // q is already 0 + s = s[1:] + case s[0] == '1': + s = s[1:] + q = 1 + case s[0] == '.': + // q is already 0 + default: + return -1, "" + } + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i++; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go new file mode 100644 index 00000000000..cb0a85283c1 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go @@ -0,0 +1,102 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +package middleware + +import ( + "net/http" + "strings" + + "github.com/go-openapi/runtime/middleware/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// NegotiateContentType returns the best offered content type for the request's +// Accept header. If two offers match with equal weight, then the more specific +// offer is preferred. For example, text/* trumps */*. If two offers match +// with equal weight and specificity, then the offer earlier in the list is +// preferred. If no offers match, then defaultOffer is returned. +func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { + bestOffer := defaultOffer + bestQ := -1.0 + bestWild := 3 + specs := header.ParseAccept(r.Header, "Accept") + for _, rawOffer := range offers { + offer := normalizeOffer(rawOffer) + // No Accept header: just return the first offer. + if len(specs) == 0 { + return rawOffer + } + for _, spec := range specs { + switch { + case spec.Q == 0.0: + // ignore + case spec.Q < bestQ: + // better match found + case spec.Value == "*/*": + if spec.Q > bestQ || bestWild > 2 { + bestQ = spec.Q + bestWild = 2 + bestOffer = rawOffer + } + case strings.HasSuffix(spec.Value, "/*"): + if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && + (spec.Q > bestQ || bestWild > 1) { + bestQ = spec.Q + bestWild = 1 + bestOffer = rawOffer + } + default: + if spec.Value == offer && + (spec.Q > bestQ || bestWild > 0) { + bestQ = spec.Q + bestWild = 0 + bestOffer = rawOffer + } + } + } + } + return bestOffer +} + +func normalizeOffers(orig []string) (norm []string) { + for _, o := range orig { + norm = append(norm, normalizeOffer(o)) + } + return +} + +func normalizeOffer(orig string) string { + const maxParts = 2 + return strings.SplitN(orig, ";", maxParts)[0] +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go new file mode 100644 index 00000000000..2e63780c70b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go @@ -0,0 +1,56 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +type errorResp struct { + code int + response any + headers http.Header +} + +func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + for k, v := range e.headers { + for _, val := range v { + rw.Header().Add(k, val) + } + } + if e.code > 0 { + rw.WriteHeader(e.code) + } else { + rw.WriteHeader(http.StatusInternalServerError) + } + if err := producer.Produce(rw, e.response); err != nil { + Logger.Printf("failed to write error response: %v", err) + } +} + +// NotImplemented the error response when the response is not implemented +func NotImplemented(message string) Responder { + return Error(http.StatusNotImplemented, message) +} + +// Error creates a generic responder for returning errors, the data will be serialized +// with the matching producer for the request +func Error(code int, data any, headers ...http.Header) Responder { + var hdr http.Header + for _, h := range headers { + for k, v := range h { + if hdr == nil { + hdr = make(http.Header) + } + hdr[k] = v + } + } + return &errorResp{ + code: code, + response: data, + headers: hdr, + } +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go new file mode 100644 index 00000000000..2a7ab1fadaf --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/operation.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import "net/http" + +// NewOperationExecutor creates a context aware middleware that handles the operations after routing +func NewOperationExecutor(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + route.Handler.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go new file mode 100644 index 00000000000..7d630d6cce6 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/parameter.go @@ -0,0 +1,480 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + "github.com/go-openapi/swag/stringutils" + "github.com/go-openapi/validate" +) + +const defaultMaxMemory = 32 << 20 + +const ( + typeString = "string" + typeArray = "array" +) + +var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + +func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { + binder := new(untypedParamBinder) + binder.Name = param.Name + binder.parameter = ¶m + binder.formats = formats + if param.In != "body" { + binder.validator = validate.NewParamValidator(¶m, formats) + } else { + binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats) + } + + return binder +} + +type untypedParamBinder struct { + parameter *spec.Parameter + formats strfmt.Registry + Name string + validator validate.EntityValidator +} + +func (p *untypedParamBinder) Type() reflect.Type { + return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items) +} + +func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error { + // fmt.Println("binding", p.name, "as", p.Type()) + switch p.parameter.In { + case "query": + data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target) + if err != nil { + return err + } + if custom { + return nil + } + + return p.bindValue(data, hasKey, target) + + case "header": + data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "path": + data, custom, hasKey, err := p.readValue(routeParams, target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "formData": + var err error + var mt string + + mt, _, e := runtime.ContentType(request.Header) + if e != nil { + // because of the interface conversion go thinks the error is not nil + // so we first check for nil and then set the err var if it's not nil + err = e + } + + if err != nil { + return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" { + return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt == "multipart/form-data" { + if err = request.ParseMultipartForm(defaultMaxMemory); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + } + + if err = request.ParseForm(); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + + if p.parameter.Type == "file" { + file, header, ffErr := request.FormFile(p.parameter.Name) + if ffErr != nil { + if p.parameter.Required { + return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) + } + + return nil + } + + target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) + return nil + } + + if request.MultipartForm != nil { + data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target) + if rvErr != nil { + return rvErr + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + } + data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "body": + newValue := reflect.New(target.Type()) + if !runtime.HasBody(request) { + if p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + } + + return nil + } + if err := consumer.Consume(request.Body, newValue.Interface()); err != nil { + if err == io.EOF && p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + return nil + } + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + return errors.InvalidType(p.Name, p.parameter.In, tpe, nil) + } + target.Set(reflect.Indirect(newValue)) + return nil + default: + return fmt.Errorf("%d: invalid parameter location %q", http.StatusInternalServerError, p.parameter.In) + } +} + +func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type { + switch tpe { + case "boolean": + return reflect.TypeFor[bool]() + + case typeString: + if tt, ok := p.formats.GetType(format); ok { + return tt + } + return reflect.TypeFor[string]() + + case "integer": + switch format { + case "int8": + return reflect.TypeFor[int8]() + case "int16": + return reflect.TypeFor[int16]() + case "int32": + return reflect.TypeFor[int32]() + case "int64": + return reflect.TypeFor[int64]() + default: + return reflect.TypeFor[int64]() + } + + case "number": + switch format { + case "float": + return reflect.TypeFor[float32]() + case "double": + return reflect.TypeFor[float64]() + } + + case typeArray: + if items == nil { + return nil + } + itemsType := p.typeForSchema(items.Type, items.Format, items.Items) + if itemsType == nil { + return nil + } + return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type() + + case "file": + return reflect.TypeFor[runtime.File]() + + case "object": + return reflect.TypeFor[map[string]any]() + } + return nil +} + +func (p *untypedParamBinder) allowsMulti() bool { + return p.parameter.In == "query" || p.parameter.In == "formData" +} + +func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) { + name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type + if tpe == typeArray { + if cf == "multi" { + if !p.allowsMulti() { + return nil, false, false, errors.InvalidCollectionFormat(name, in, cf) + } + vv, hasKey, _ := values.GetOK(name) + return vv, false, hasKey, nil + } + + v, hk, hv := values.GetOK(name) + if !hv { + return nil, false, hk, nil + } + d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target) + return d, c, hk, e + } + + vv, hk, _ := values.GetOK(name) + return vv, false, hk, nil +} + +func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error { + if p.parameter.Type == typeArray { + return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey) + } + var d string + if len(data) > 0 { + d = data[len(data)-1] + } + return p.setFieldValue(target, p.parameter.Default, d, hasKey) +} + +func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue any, data string, hasKey bool) error { //nolint:gocyclo + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + + if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil { + return errors.Required(p.Name, p.parameter.In, data) + } + + ok, err := p.tryUnmarshaler(target, defaultValue, data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if ok { + return nil + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if tpe == "byte" { + if data == "" { + if target.CanSet() { + target.SetBytes(defVal.Bytes()) + } + return nil + } + + b, err := base64.StdEncoding.DecodeString(data) + if err != nil { + b, err = base64.URLEncoding.DecodeString(data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + } + if target.CanSet() { + target.SetBytes(b) + } + return nil + } + + switch target.Kind() { //nolint:exhaustive // we want to check only types that map from a swagger parameter + case reflect.Bool: + if data == "" { + if target.CanSet() { + target.SetBool(defVal.Bool()) + } + return nil + } + b, err := conv.ConvertBool(data) + if err != nil { + return err + } + if target.CanSet() { + target.SetBool(b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeFor[int64]()) + target.SetInt(rd.Int()) + } + return nil + } + i, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowInt(i) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetInt(i) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeFor[uint64]()) + target.SetUint(rd.Uint()) + } + return nil + } + u, err := strconv.ParseUint(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowUint(u) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetUint(u) + } + + case reflect.Float32, reflect.Float64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeFor[float64]()) + target.SetFloat(rd.Float()) + } + return nil + } + f, err := strconv.ParseFloat(data, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowFloat(f) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetFloat(f) + } + + case reflect.String: + value := data + if value == "" { + value = defVal.String() + } + // validate string + if target.CanSet() { + target.SetString(value) + } + + case reflect.Ptr: + if data == "" && defVal.Kind() == reflect.Ptr { + if target.CanSet() { + target.Set(defVal) + } + return nil + } + newVal := reflect.New(target.Type().Elem()) + if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil { + return err + } + if target.CanSet() { + target.Set(newVal) + } + + default: + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + return nil +} + +func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue any, data string) (bool, error) { + if !target.CanSet() { + return false, nil + } + // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more + if reflect.PointerTo(target.Type()).Implements(textUnmarshalType) { + if defaultValue != nil && len(data) == 0 { + target.Set(reflect.ValueOf(defaultValue)) + return true, nil + } + value := reflect.New(target.Type()) + if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil { + return true, err + } + target.Set(reflect.Indirect(value)) + return true, nil + } + return false, nil +} + +func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) { + ok, err := p.tryUnmarshaler(target, p.parameter.Default, data) + if err != nil { + return nil, true, err + } + if ok { + return nil, true, nil + } + + return stringutils.SplitByFormat(data, p.parameter.CollectionFormat), false, nil +} + +func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue any, data []string, hasKey bool) error { + sz := len(data) + if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil { + return errors.Required(p.Name, p.parameter.In, data) + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if !target.CanSet() { + return nil + } + if sz == 0 { + target.Set(defVal) + return nil + } + + value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz) + + for i := range sz { + if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil { + return err + } + } + + target.Set(value) + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go new file mode 100644 index 00000000000..6039a26f33e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go @@ -0,0 +1,83 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RapiDocOpts configures the RapiDoc middlewares +type RapiDocOpts struct { + // BasePath for the UI, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string + + // RapiDocURL points to the js asset that generates the rapidoc site. + // + // Defaults to https://unpkg.com/rapidoc/dist/rapidoc-min.js + RapiDocURL string +} + +func (r *RapiDocOpts) EnsureDefaults() { + common := toCommonUIOptions(r) + common.EnsureDefaults() + fromCommonToAnyOptions(common, r) + + // rapidoc-specifics + if r.RapiDocURL == "" { + r.RapiDocURL = rapidocLatest + } + if r.Template == "" { + r.Template = rapidocTemplate + } +} + +// RapiDoc creates a middleware to serve a documentation site for a swagger spec. +// +// This allows for altering the spec before starting the http listener. +func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("rapidoc").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) +} + +const ( + rapidocLatest = "https://unpkg.com/rapidoc/dist/rapidoc-min.js" + rapidocTemplate = ` + + + {{ .Title }} + + + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go new file mode 100644 index 00000000000..cbaec73c438 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -0,0 +1,97 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RedocOpts configures the Redoc middlewares +type RedocOpts struct { + // BasePath for the UI, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string + + // RedocURL points to the js that generates the redoc site. + // + // Defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js + RedocURL string +} + +// EnsureDefaults in case some options are missing +func (r *RedocOpts) EnsureDefaults() { + common := toCommonUIOptions(r) + common.EnsureDefaults() + fromCommonToAnyOptions(common, r) + + // redoc-specifics + if r.RedocURL == "" { + r.RedocURL = redocLatest + } + if r.Template == "" { + r.Template = redocTemplate + } +} + +// Redoc creates a middleware to serve a documentation site for a swagger spec. +// +// This allows for altering the spec before starting the http listener. +func Redoc(opts RedocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("redoc").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) +} + +const ( + redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" + redocTemplate = ` + + + {{ .Title }} + + + + + + + + + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go new file mode 100644 index 00000000000..52facfefcd2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "net/http" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// UntypedRequestBinder binds and validates the data from a http request +type UntypedRequestBinder struct { + Spec *spec.Swagger + Parameters map[string]spec.Parameter + Formats strfmt.Registry + paramBinders map[string]*untypedParamBinder + debugLogf func(string, ...any) // a logging function to debug context and all components using it +} + +// NewUntypedRequestBinder creates a new binder for reading a request. +func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder { + binders := make(map[string]*untypedParamBinder) + for fieldName, param := range parameters { + binders[fieldName] = newUntypedParamBinder(param, spec, formats) + } + return &UntypedRequestBinder{ + Parameters: parameters, + paramBinders: binders, + Spec: spec, + Formats: formats, + debugLogf: debugLogfFunc(nil), + } +} + +// Bind perform the databinding and validation +func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data any) error { + val := reflect.Indirect(reflect.ValueOf(data)) + isMap := val.Kind() == reflect.Map + var result []error + o.debugLogf("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) + for fieldName, param := range o.Parameters { + binder := o.paramBinders[fieldName] + o.debugLogf("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) + var target reflect.Value + if !isMap { + binder.Name = fieldName + target = val.FieldByName(fieldName) + } + + if isMap { + tpe := binder.Type() + if tpe == nil { + if param.Schema.Type.Contains(typeArray) { + tpe = reflect.TypeFor[[]any]() + } else { + tpe = reflect.TypeFor[map[string]any]() + } + } + target = reflect.Indirect(reflect.New(tpe)) + } + + if !target.IsValid() { + result = append(result, errors.New(http.StatusInternalServerError, "parameter name %q is an unknown field", binder.Name)) + continue + } + + if err := binder.Bind(request, routeParams, consumer, target); err != nil { + result = append(result, err) + continue + } + + if binder.validator != nil { + rr := binder.validator.Validate(target.Interface()) + if rr != nil && rr.HasErrors() { + result = append(result, rr.AsError()) + } + } + + if isMap { + val.SetMapIndex(reflect.ValueOf(param.Name), target) + } + } + + if len(result) > 0 { + return errors.CompositeValidationError(result...) + } + + return nil +} + +// SetLogger allows for injecting a logger to catch debug entries. +// +// The logger is enabled in DEBUG mode only. +func (o *UntypedRequestBinder) SetLogger(lg logger.Logger) { + o.debugLogf = debugLogfFunc(lg) +} + +func (o *UntypedRequestBinder) setDebugLogf(fn func(string, ...any)) { + o.debugLogf = fn +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go new file mode 100644 index 00000000000..16816580da8 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -0,0 +1,520 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "fmt" + "net/http" + "net/url" + fpath "path" + "regexp" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware/denco" + "github.com/go-openapi/runtime/security" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/stringutils" +) + +// RouteParam is a object to capture route params in a framework agnostic way. +// implementations of the muxer should use these route params to communicate with the +// swagger framework +type RouteParam struct { + Name string + Value string +} + +// RouteParams the collection of route params +type RouteParams []RouteParam + +// Get gets the value for the route param for the specified key +func (r RouteParams) Get(name string) string { + vv, _, _ := r.GetOK(name) + if len(vv) > 0 { + return vv[len(vv)-1] + } + return "" +} + +// GetOK gets the value but also returns booleans to indicate if a key or value +// is present. This aids in validation and satisfies an interface in use there +// +// The returned values are: data, has key, has value +func (r RouteParams) GetOK(name string) ([]string, bool, bool) { + for _, p := range r { + if p.Name == name { + return []string{p.Value}, true, p.Value != "" + } + } + return nil, false, false +} + +// NewRouter creates a new context-aware router middleware +func NewRouter(ctx *Context, next http.Handler) http.Handler { + if ctx.router == nil { + ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf)) + } + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if _, rCtx, ok := ctx.RouteInfo(r); ok { + next.ServeHTTP(rw, rCtx) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) + }) +} + +// RoutableAPI represents an interface for things that can serve +// as a provider of implementations for the swagger router +type RoutableAPI interface { + HandlerFor(string, string) (http.Handler, bool) + ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) + ConsumersFor([]string) map[string]runtime.Consumer + ProducersFor([]string) map[string]runtime.Producer + AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator + Authorizer() runtime.Authorizer + Formats() strfmt.Registry + DefaultProduces() string + DefaultConsumes() string +} + +// Router represents a swagger-aware router +type Router interface { + Lookup(method, path string) (*MatchedRoute, bool) + OtherMethods(method, path string) []string +} + +type defaultRouteBuilder struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + records map[string][]denco.Record + debugLogf func(string, ...any) // a logging function to debug context and all components using it +} + +type defaultRouter struct { + spec *loads.Document + routers map[string]*denco.Router + debugLogf func(string, ...any) // a logging function to debug context and all components using it +} + +func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) *defaultRouteBuilder { + var o defaultRouterOpts + for _, apply := range opts { + apply(&o) + } + if o.debugLogf == nil { + o.debugLogf = debugLogfFunc(nil) // defaults to standard logger + } + + return &defaultRouteBuilder{ + spec: spec, + analyzer: analysis.New(spec.Spec()), + api: api, + records: make(map[string][]denco.Record), + debugLogf: o.debugLogf, + } +} + +// DefaultRouterOpt allows to inject optional behavior to the default router. +type DefaultRouterOpt func(*defaultRouterOpts) + +type defaultRouterOpts struct { + debugLogf func(string, ...any) +} + +// WithDefaultRouterLogger sets the debug logger for the default router. +// +// This is enabled only in DEBUG mode. +func WithDefaultRouterLogger(lg logger.Logger) DefaultRouterOpt { + return func(o *defaultRouterOpts) { + o.debugLogf = debugLogfFunc(lg) + } +} + +// WithDefaultRouterLoggerFunc sets a logging debug method for the default router. +func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt { + return func(o *defaultRouterOpts) { + o.debugLogf = fn + } +} + +// DefaultRouter creates a default implementation of the router +func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router { + builder := newDefaultRouteBuilder(spec, api, opts...) + if spec != nil { + for method, paths := range builder.analyzer.Operations() { + for path, operation := range paths { + fp := fpath.Join(spec.BasePath(), path) + builder.debugLogf("adding route %s %s %q", method, fp, operation.ID) + builder.AddRoute(method, fp, operation) + } + } + } + return builder.Build() +} + +// RouteAuthenticator is an authenticator that can compose several authenticators together. +// It also knows when it contains an authenticator that allows for anonymous pass through. +// Contains a group of 1 or more authenticators that have a logical AND relationship +type RouteAuthenticator struct { + Authenticator map[string]runtime.Authenticator + Schemes []string + Scopes map[string][]string + allScopes []string + commonScopes []string + allowAnonymous bool +} + +func (ra *RouteAuthenticator) AllowsAnonymous() bool { + return ra.allowAnonymous +} + +// AllScopes returns a list of unique scopes that is the combination +// of all the scopes in the requirements +func (ra *RouteAuthenticator) AllScopes() []string { + return ra.allScopes +} + +// CommonScopes returns a list of unique scopes that are common in all the +// scopes in the requirements +func (ra *RouteAuthenticator) CommonScopes() []string { + return ra.commonScopes +} + +// Authenticate Authenticator interface implementation +func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, any, error) { + if ra.allowAnonymous { + route.Authenticator = ra + return true, nil, nil + } + // iterate in proper order + var lastResult any + for _, scheme := range ra.Schemes { + if authenticator, ok := ra.Authenticator[scheme]; ok { + applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ + Request: req, + RequiredScopes: ra.Scopes[scheme], + }) + if !applies { + return false, nil, nil + } + if err != nil { + route.Authenticator = ra + return true, nil, err + } + lastResult = princ + } + } + route.Authenticator = ra + return true, lastResult, nil +} + +func stringSliceUnion(slices ...[]string) []string { + unique := make(map[string]struct{}) + var result []string + for _, slice := range slices { + for _, entry := range slice { + if _, ok := unique[entry]; ok { + continue + } + unique[entry] = struct{}{} + result = append(result, entry) + } + } + return result +} + +func stringSliceIntersection(slices ...[]string) []string { + unique := make(map[string]int) + var intersection []string + + total := len(slices) + var emptyCnt int + for _, slice := range slices { + if len(slice) == 0 { + emptyCnt++ + continue + } + + for _, entry := range slice { + unique[entry]++ + if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices + intersection = append(intersection, entry) + } + } + } + + return intersection +} + +// RouteAuthenticators represents a group of authenticators that represent a logical OR +type RouteAuthenticators []RouteAuthenticator + +// AllowsAnonymous returns true when there is an authenticator that means optional auth +func (ras RouteAuthenticators) AllowsAnonymous() bool { + for _, ra := range ras { + if ra.AllowsAnonymous() { + return true + } + } + return false +} + +// Authenticate method implemention so this collection can be used as authenticator +func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, any, error) { + var lastError error + var allowsAnon bool + var anonAuth RouteAuthenticator + + for _, ra := range ras { + if ra.AllowsAnonymous() { + anonAuth = ra + allowsAnon = true + continue + } + applies, usr, err := ra.Authenticate(req, route) + if !applies || err != nil || usr == nil { + if err != nil { + lastError = err + } + continue + } + return applies, usr, nil + } + + if allowsAnon && lastError == nil { + route.Authenticator = &anonAuth + return true, nil, lastError + } + return lastError != nil, nil, lastError +} + +type routeEntry struct { + PathPattern string + BasePath string + Operation *spec.Operation + Consumes []string + Consumers map[string]runtime.Consumer + Produces []string + Producers map[string]runtime.Producer + Parameters map[string]spec.Parameter + Handler http.Handler + Formats strfmt.Registry + Binder *UntypedRequestBinder + Authenticators RouteAuthenticators + Authorizer runtime.Authorizer +} + +// MatchedRoute represents the route that was matched in this request +type MatchedRoute struct { + routeEntry + + Params RouteParams + Consumer runtime.Consumer + Producer runtime.Producer + Authenticator *RouteAuthenticator +} + +// HasAuth returns true when the route has a security requirement defined +func (m *MatchedRoute) HasAuth() bool { + return len(m.Authenticators) > 0 +} + +// NeedsAuth returns true when the request still +// needs to perform authentication +func (m *MatchedRoute) NeedsAuth() bool { + return m.HasAuth() && m.Authenticator == nil +} + +func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { + mth := strings.ToUpper(method) + d.debugLogf("looking up route for %s %s", method, path) + if Debug { + if len(d.routers) == 0 { + d.debugLogf("there are no known routers") + } + for meth := range d.routers { + d.debugLogf("got a router for %s", meth) + } + } + if router, ok := d.routers[mth]; ok { + if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { + if entry, ok := m.(*routeEntry); ok { + d.debugLogf("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) + var params RouteParams + for _, p := range rp { + v, err := url.PathUnescape(p.Value) + if err != nil { + d.debugLogf("failed to escape %q: %v", p.Value, err) + v = p.Value + } + // a workaround to handle fragment/composing parameters until they are supported in denco router + // check if this parameter is a fragment within a path segment + const enclosureSize = 2 + if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + enclosureSize; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { + // extract fragment parameters + ep := strings.Split(entry.PathPattern[xpos:], "/")[0] + pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) + for i, pname := range pnames { + params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) + } + } else { + // use the parameter directly + params = append(params, RouteParam{Name: p.Name, Value: v}) + } + } + return &MatchedRoute{routeEntry: *entry, Params: params}, true + } + } else { + d.debugLogf("couldn't find a route by path for %s %s", method, path) + } + } else { + d.debugLogf("couldn't find a route by method for %s %s", method, path) + } + return nil, false +} + +func (d *defaultRouter) OtherMethods(method, path string) []string { + mn := strings.ToUpper(method) + var methods []string + for k, v := range d.routers { + if k != mn { + if _, _, ok := v.Lookup(fpath.Clean(path)); ok { + methods = append(methods, k) + continue + } + } + } + return methods +} + +func (d *defaultRouter) SetLogger(lg logger.Logger) { + d.debugLogf = debugLogfFunc(lg) +} + +// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco +var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) + +func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { + pleft := strings.Index(pattern, "{") + names = append(names, name) + if pleft < 0 { + if strings.HasSuffix(value, pattern) { + values = append(values, value[:len(value)-len(pattern)]) + } else { + values = append(values, "") + } + } else { + toskip := pattern[:pleft] + pright := strings.Index(pattern, "}") + vright := strings.Index(value, toskip) + if vright >= 0 { + values = append(values, value[:vright]) + } else { + values = append(values, "") + value = "" + } + return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) + } + return names, values +} + +func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { + mn := strings.ToUpper(method) + + bp := fpath.Clean(d.spec.BasePath()) + if len(bp) > 0 && bp[len(bp)-1] == '/' { + bp = bp[:len(bp)-1] + } + + d.debugLogf("operation: %#v", *operation) + if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { + consumes := d.analyzer.ConsumesFor(operation) + produces := d.analyzer.ProducesFor(operation) + parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) + + // add API defaults if not part of the spec + if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !stringutils.ContainsStringsCI(consumes, defConsumes) { + consumes = append(consumes, defConsumes) + } + + if defProduces := d.api.DefaultProduces(); defProduces != "" && !stringutils.ContainsStringsCI(produces, defProduces) { + produces = append(produces, defProduces) + } + + requestBinder := NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()) + requestBinder.setDebugLogf(d.debugLogf) + record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ + BasePath: bp, + PathPattern: path, + Operation: operation, + Handler: handler, + Consumes: consumes, + Produces: produces, + Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), + Producers: d.api.ProducersFor(normalizeOffers(produces)), + Parameters: parameters, + Formats: d.api.Formats(), + Binder: requestBinder, + Authenticators: d.buildAuthenticators(operation), + Authorizer: d.api.Authorizer(), + }) + d.records[mn] = append(d.records[mn], record) + } +} + +func (d *defaultRouteBuilder) Build() *defaultRouter { + routers := make(map[string]*denco.Router) + for method, records := range d.records { + router := denco.New() + _ = router.Build(records) + routers[method] = router + } + return &defaultRouter{ + spec: d.spec, + routers: routers, + debugLogf: d.debugLogf, + } +} + +func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { + requirements := d.analyzer.SecurityRequirementsFor(operation) + auths := make([]RouteAuthenticator, 0, len(requirements)) + for _, reqs := range requirements { + schemes := make([]string, 0, len(reqs)) + scopes := make(map[string][]string, len(reqs)) + scopeSlices := make([][]string, 0, len(reqs)) + for _, req := range reqs { + schemes = append(schemes, req.Name) + scopes[req.Name] = req.Scopes + scopeSlices = append(scopeSlices, req.Scopes) + } + + definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) + authenticators := d.api.AuthenticatorsFor(definitions) + auths = append(auths, RouteAuthenticator{ + Authenticator: authenticators, + Schemes: schemes, + Scopes: scopes, + allScopes: stringSliceUnion(scopeSlices...), + commonScopes: stringSliceIntersection(scopeSlices...), + allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", + }) + } + return auths +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go new file mode 100644 index 00000000000..37ecfa6fd4e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/security.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import "net/http" + +func newSecureAPI(ctx *Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + if route != nil && !route.NeedsAuth() { + next.ServeHTTP(rw, r) + return + } + + _, rCtx, err := ctx.Authorize(r, route) + if err != nil { + ctx.Respond(rw, r, route.Produces, route, err) + return + } + r = rCtx + + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go new file mode 100644 index 00000000000..9cc9940aaa5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "net/http" + "path" +) + +const ( + contentTypeHeader = "Content-Type" + applicationJSON = "application/json" +) + +// SpecOption can be applied to the Spec serving middleware +type SpecOption func(*specOptions) + +var defaultSpecOptions = specOptions{ + Path: "", + Document: "swagger.json", +} + +type specOptions struct { + Path string + Document string +} + +func specOptionsWithDefaults(opts []SpecOption) specOptions { + o := defaultSpecOptions + for _, apply := range opts { + apply(&o) + } + + return o +} + +// Spec creates a middleware to serve a swagger spec as a JSON document. +// +// This allows for altering the spec before starting the http listener. +// +// The basePath argument indicates the path of the spec document (defaults to "/"). +// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json"). +func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler { + if basePath == "" { + basePath = "/" + } + o := specOptionsWithDefaults(opts) + pth := path.Join(basePath, o.Path, o.Document) + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Clean(r.URL.Path) == pth { + rw.Header().Set(contentTypeHeader, applicationJSON) + rw.WriteHeader(http.StatusOK) + _, _ = rw.Write(b) + + return + } + + if next != nil { + next.ServeHTTP(rw, r) + + return + } + + rw.Header().Set(contentTypeHeader, applicationJSON) + rw.WriteHeader(http.StatusNotFound) + }) +} + +// WithSpecPath sets the path to be joined to the base path of the Spec middleware. +// +// This is empty by default. +func WithSpecPath(pth string) SpecOption { + return func(o *specOptions) { + o.Path = pth + } +} + +// WithSpecDocument sets the name of the JSON document served as a spec. +// +// By default, this is "swagger.json" +func WithSpecDocument(doc string) SpecOption { + return func(o *specOptions) { + if doc == "" { + return + } + + o.Document = doc + } +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go new file mode 100644 index 00000000000..b25a3a2cff7 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go @@ -0,0 +1,178 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// SwaggerUIOpts configures the SwaggerUI middleware +type SwaggerUIOpts struct { + // BasePath for the API, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string + + // OAuthCallbackURL the url called after OAuth2 login + OAuthCallbackURL string + + // The three components needed to embed swagger-ui + + // SwaggerURL points to the js that generates the SwaggerUI site. + // + // Defaults to: https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js + SwaggerURL string + + SwaggerPresetURL string + SwaggerStylesURL string + + Favicon32 string + Favicon16 string +} + +// EnsureDefaults in case some options are missing +func (r *SwaggerUIOpts) EnsureDefaults() { + r.ensureDefaults() + + if r.Template == "" { + r.Template = swaggeruiTemplate + } +} + +func (r *SwaggerUIOpts) EnsureDefaultsOauth2() { + r.ensureDefaults() + + if r.Template == "" { + r.Template = swaggerOAuthTemplate + } +} + +func (r *SwaggerUIOpts) ensureDefaults() { + common := toCommonUIOptions(r) + common.EnsureDefaults() + fromCommonToAnyOptions(common, r) + + // swaggerui-specifics + if r.OAuthCallbackURL == "" { + r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback") + } + if r.SwaggerURL == "" { + r.SwaggerURL = swaggerLatest + } + if r.SwaggerPresetURL == "" { + r.SwaggerPresetURL = swaggerPresetLatest + } + if r.SwaggerStylesURL == "" { + r.SwaggerStylesURL = swaggerStylesLatest + } + if r.Favicon16 == "" { + r.Favicon16 = swaggerFavicon16Latest + } + if r.Favicon32 == "" { + r.Favicon32 = swaggerFavicon32Latest + } +} + +// SwaggerUI creates a middleware to serve a documentation site for a swagger spec. +// +// This allows for altering the spec before starting the http listener. +func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("swaggerui").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) +} + +const ( + swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js" + swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js" + swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css" + swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png" + swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png" + swaggeruiTemplate = ` + + + + + {{ .Title }} + + + + + + + + +
+ + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go new file mode 100644 index 00000000000..879bdbaadea --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go @@ -0,0 +1,108 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "fmt" + "net/http" + "text/template" +) + +func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaultsOauth2() + + pth := opts.OAuthCallbackURL + tmpl := template.Must(template.New("swaggeroauth").Parse(opts.Template)) + assets := bytes.NewBuffer(nil) + if err := tmpl.Execute(assets, opts); err != nil { + panic(fmt.Errorf("cannot execute template: %w", err)) + } + + return serveUI(pth, assets.Bytes(), next) +} + +const ( + swaggerOAuthTemplate = ` + + + + {{ .Title }} + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/ui_options.go b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go new file mode 100644 index 00000000000..cf2f673d3cb --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go @@ -0,0 +1,176 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "bytes" + "encoding/gob" + "fmt" + "net/http" + "path" + "strings" +) + +const ( + // constants that are common to all UI-serving middlewares + defaultDocsPath = "docs" + defaultDocsURL = "/swagger.json" + defaultDocsTitle = "API Documentation" +) + +// uiOptions defines common options for UI serving middlewares. +type uiOptions struct { + // BasePath for the UI, defaults to: / + BasePath string + + // Path combines with BasePath to construct the path to the UI, defaults to: "docs". + Path string + + // SpecURL is the URL of the spec document. + // + // Defaults to: /swagger.json + SpecURL string + + // Title for the documentation site, default to: API documentation + Title string + + // Template specifies a custom template to serve the UI + Template string +} + +// toCommonUIOptions converts any UI option type to retain the common options. +// +// This uses gob encoding/decoding to convert common fields from one struct to another. +func toCommonUIOptions(opts any) uiOptions { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + dec := gob.NewDecoder(&buf) + var o uiOptions + err := enc.Encode(opts) + if err != nil { + panic(err) + } + + err = dec.Decode(&o) + if err != nil { + panic(err) + } + + return o +} + +func fromCommonToAnyOptions[T any](source uiOptions, target *T) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + dec := gob.NewDecoder(&buf) + err := enc.Encode(source) + if err != nil { + panic(err) + } + + err = dec.Decode(target) + if err != nil { + panic(err) + } +} + +// UIOption can be applied to UI serving middleware, such as Context.APIHandler or +// Context.APIHandlerSwaggerUI to alter the defaut behavior. +type UIOption func(*uiOptions) + +func uiOptionsWithDefaults(opts []UIOption) uiOptions { + var o uiOptions + for _, apply := range opts { + apply(&o) + } + + return o +} + +// WithUIBasePath sets the base path from where to serve the UI assets. +// +// By default, Context middleware sets this value to the API base path. +func WithUIBasePath(base string) UIOption { + return func(o *uiOptions) { + if !strings.HasPrefix(base, "/") { + base = "/" + base + } + o.BasePath = base + } +} + +// WithUIPath sets the path from where to serve the UI assets (i.e. /{basepath}/{path}. +func WithUIPath(pth string) UIOption { + return func(o *uiOptions) { + o.Path = pth + } +} + +// WithUISpecURL sets the path from where to serve swagger spec document. +// +// This may be specified as a full URL or a path. +// +// By default, this is "/swagger.json" +func WithUISpecURL(specURL string) UIOption { + return func(o *uiOptions) { + o.SpecURL = specURL + } +} + +// WithUITitle sets the title of the UI. +// +// By default, Context middleware sets this value to the title found in the API spec. +func WithUITitle(title string) UIOption { + return func(o *uiOptions) { + o.Title = title + } +} + +// WithTemplate allows to set a custom template for the UI. +// +// UI middleware will panic if the template does not parse or execute properly. +func WithTemplate(tpl string) UIOption { + return func(o *uiOptions) { + o.Template = tpl + } +} + +// EnsureDefaults in case some options are missing +func (r *uiOptions) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = defaultDocsPath + } + if r.SpecURL == "" { + r.SpecURL = defaultDocsURL + } + if r.Title == "" { + r.Title = defaultDocsTitle + } +} + +// serveUI creates a middleware that serves a templated asset as text/html. +func serveUI(pth string, assets []byte, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Clean(r.URL.Path) == pth { + rw.Header().Set(contentTypeHeader, "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + _, _ = rw.Write(assets) + + return + } + + if next != nil { + next.ServeHTTP(rw, r) + + return + } + + rw.Header().Set(contentTypeHeader, "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = fmt.Fprintf(rw, "%q not found", pth) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go new file mode 100644 index 00000000000..774da0ba0c8 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -0,0 +1,282 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package untyped + +import ( + "fmt" + "net/http" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +const ( + smallPreallocatedSlots = 10 + mediumPreallocatedSlots = 30 +) + +// API represents an untyped mux for a swagger spec +type API struct { + spec *loads.Document + analyzer *analysis.Spec + DefaultProduces string + DefaultConsumes string + consumers map[string]runtime.Consumer + producers map[string]runtime.Producer + authenticators map[string]runtime.Authenticator + authorizer runtime.Authorizer + operations map[string]map[string]runtime.OperationHandler + ServeError func(http.ResponseWriter, *http.Request, error) + Models map[string]func() any + formats strfmt.Registry +} + +// NewAPI creates the default untyped API +func NewAPI(spec *loads.Document) *API { + var an *analysis.Spec + if spec != nil && spec.Spec() != nil { + an = analysis.New(spec.Spec()) + } + api := &API{ + spec: spec, + analyzer: an, + consumers: make(map[string]runtime.Consumer, smallPreallocatedSlots), + producers: make(map[string]runtime.Producer, smallPreallocatedSlots), + authenticators: make(map[string]runtime.Authenticator), + operations: make(map[string]map[string]runtime.OperationHandler), + ServeError: errors.ServeError, + Models: make(map[string]func() any), + formats: strfmt.NewFormats(), + } + + return api.WithJSONDefaults() +} + +// WithJSONDefaults loads the json defaults for this api +func (d *API) WithJSONDefaults() *API { + d.DefaultConsumes = runtime.JSONMime + d.DefaultProduces = runtime.JSONMime + d.consumers[runtime.JSONMime] = runtime.JSONConsumer() + d.producers[runtime.JSONMime] = runtime.JSONProducer() + return d +} + +// WithoutJSONDefaults clears the json defaults for this api +func (d *API) WithoutJSONDefaults() *API { + d.DefaultConsumes = "" + d.DefaultProduces = "" + delete(d.consumers, runtime.JSONMime) + delete(d.producers, runtime.JSONMime) + return d +} + +// Formats returns the registered string formats +func (d *API) Formats() strfmt.Registry { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + return d.formats +} + +// RegisterFormat registers a custom format validator +func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + d.formats.Add(name, format, validator) +} + +// RegisterAuth registers an auth handler in this api +func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { + if d.authenticators == nil { + d.authenticators = make(map[string]runtime.Authenticator) + } + d.authenticators[scheme] = handler +} + +// RegisterAuthorizer registers an authorizer handler in this api +func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { + d.authorizer = handler +} + +// RegisterConsumer registers a consumer for a media type. +func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { + if d.consumers == nil { + d.consumers = make(map[string]runtime.Consumer, smallPreallocatedSlots) + } + d.consumers[strings.ToLower(mediaType)] = handler +} + +// RegisterProducer registers a producer for a media type +func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { + if d.producers == nil { + d.producers = make(map[string]runtime.Producer, smallPreallocatedSlots) + } + d.producers[strings.ToLower(mediaType)] = handler +} + +// RegisterOperation registers an operation handler for an operation name +func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { + if d.operations == nil { + d.operations = make(map[string]map[string]runtime.OperationHandler, mediumPreallocatedSlots) + } + um := strings.ToUpper(method) + if b, ok := d.operations[um]; !ok || b == nil { + d.operations[um] = make(map[string]runtime.OperationHandler) + } + d.operations[um][path] = handler +} + +// OperationHandlerFor returns the operation handler for the specified id if it can be found +func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { + if d.operations == nil { + return nil, false + } + if pi, ok := d.operations[strings.ToUpper(method)]; ok { + h, ok := pi[path] + return h, ok + } + return nil, false +} + +// ConsumersFor gets the consumers for the specified media types +func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + result := make(map[string]runtime.Consumer) + for _, mt := range mediaTypes { + if consumer, ok := d.consumers[mt]; ok { + result[mt] = consumer + } + } + return result +} + +// ProducersFor gets the producers for the specified media types +func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + result := make(map[string]runtime.Producer) + for _, mt := range mediaTypes { + if producer, ok := d.producers[mt]; ok { + result[mt] = producer + } + } + return result +} + +// AuthenticatorsFor gets the authenticators for the specified security schemes +func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + result := make(map[string]runtime.Authenticator) + for k := range schemes { + if a, ok := d.authenticators[k]; ok { + result[k] = a + } + } + return result +} + +// Authorizer returns the registered authorizer +func (d *API) Authorizer() runtime.Authorizer { + return d.authorizer +} + +// Validate validates this API for any missing items +func (d *API) Validate() error { + return d.validate() +} + +// validateWith validates the registrations in this API against the provided spec analyzer +func (d *API) validate() error { + consumes := make([]string, 0, len(d.consumers)) + for k := range d.consumers { + consumes = append(consumes, k) + } + + produces := make([]string, 0, len(d.producers)) + for k := range d.producers { + produces = append(produces, k) + } + + authenticators := make([]string, 0, len(d.authenticators)) + for k := range d.authenticators { + authenticators = append(authenticators, k) + } + + operations := make([]string, 0, len(d.operations)) + for m, v := range d.operations { + for p := range v { + operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) + } + } + + secDefinitions := d.spec.Spec().SecurityDefinitions + definedAuths := make([]string, 0, len(secDefinitions)) + for k := range secDefinitions { + definedAuths = append(definedAuths, k) + } + + if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { + return err + } + if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { + return err + } + if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { + return err + } + + requiredAuths := d.analyzer.RequiredSecuritySchemes() + if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { + return err + } + if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { + return err + } + return nil +} + +func (d *API) verify(name string, registrations []string, expectations []string) error { + sort.Strings(registrations) + sort.Strings(expectations) + + expected := map[string]struct{}{} + seen := map[string]struct{}{} + + for _, v := range expectations { + expected[v] = struct{}{} + } + + var unspecified []string + for _, v := range registrations { + seen[v] = struct{}{} + if _, ok := expected[v]; !ok { + unspecified = append(unspecified, v) + } + } + + for k := range seen { + delete(expected, k) + } + + unregistered := make([]string, 0, len(expected)) + for k := range expected { + unregistered = append(unregistered, k) + } + sort.Strings(unspecified) + sort.Strings(unregistered) + + if len(unregistered) > 0 || len(unspecified) > 0 { + return &errors.APIVerificationFailed{ + Section: name, + MissingSpecification: unspecified, + MissingRegistration: unregistered, + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go new file mode 100644 index 00000000000..ed026d626ba --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -0,0 +1,118 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package middleware + +import ( + "mime" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/swag/stringutils" +) + +type validation struct { + context *Context + result []error + request *http.Request + route *MatchedRoute + bound map[string]any +} + +// ContentType validates the content type of a request +func validateContentType(allowed []string, actual string) error { + if len(allowed) == 0 { + return nil + } + mt, _, err := mime.ParseMediaType(actual) + if err != nil { + return errors.InvalidContentType(actual, allowed) + } + if stringutils.ContainsStringsCI(allowed, mt) { + return nil + } + if stringutils.ContainsStringsCI(allowed, "*/*") { + return nil + } + parts := strings.Split(actual, "/") + if len(parts) == 2 && stringutils.ContainsStringsCI(allowed, parts[0]+"/*") { + return nil + } + return errors.InvalidContentType(actual, allowed) +} + +func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { + validate := &validation{ + context: ctx, + request: request, + route: route, + bound: make(map[string]any), + } + validate.debugLogf("validating request %s %s", request.Method, request.URL.EscapedPath()) + + validate.contentType() + if len(validate.result) == 0 { + validate.responseFormat() + } + if len(validate.result) == 0 { + validate.parameters() + } + + return validate +} + +func (v *validation) debugLogf(format string, args ...any) { + v.context.debugLogf(format, args...) +} + +func (v *validation) parameters() { + v.debugLogf("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) + if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { + if result.Error() == "validation failure list" { + for _, e := range result.(*errors.Validation).Value.([]any) { + v.result = append(v.result, e.(error)) + } + return + } + v.result = append(v.result, result) + } +} + +func (v *validation) contentType() { + if len(v.result) == 0 && runtime.HasBody(v.request) { + v.debugLogf("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) + ct, _, req, err := v.context.ContentType(v.request) + if err != nil { + v.result = append(v.result, err) + } else { + v.request = req + } + + if len(v.result) == 0 { + v.debugLogf("validating content type for %q against [%s]", ct, strings.Join(v.route.Consumes, ", ")) + if err := validateContentType(v.route.Consumes, ct); err != nil { + v.result = append(v.result, err) + } + } + if ct != "" && v.route.Consumer == nil { + cons, ok := v.route.Consumers[ct] + if !ok { + v.result = append(v.result, errors.New(http.StatusInternalServerError, "no consumer registered for %s", ct)) + } else { + v.route.Consumer = cons + } + } + } +} + +func (v *validation) responseFormat() { + // if the route provides values for Produces and no format could be identify then return an error. + // if the route does not specify values for Produces then treat request as valid since the API designer + // choose not to specify the format for responses. + if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 { + v.request = rCtx + v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) + } +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 00000000000..aab7b8c055a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "bufio" + "context" + "errors" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag/stringutils" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get("Content-Length") != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + if p.underlying == nil { + return 0, io.ErrUnexpectedEOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + if p.underlying == nil { + return errors.New("reader already closed") + } + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set. +// +// It uses context.Background. +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return stringutils.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go new file mode 100644 index 00000000000..b5b7904dc1e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package security + +import ( + "context" + "net/http" + "strings" + + "github.com/go-openapi/errors" + + "github.com/go-openapi/runtime" +) + +const ( + query = "query" + header = "header" + accessTokenParam = "access_token" +) + +// HttpAuthenticator is a function that authenticates a HTTP request +func HttpAuthenticator(handler func(*http.Request) (bool, any, error)) runtime.Authenticator { //nolint:revive + return runtime.AuthenticatorFunc(func(params any) (bool, any, error) { + if request, ok := params.(*http.Request); ok { + return handler(request) + } + if scoped, ok := params.(*ScopedAuthRequest); ok { + return handler(scoped.Request) + } + return false, nil, nil + }) +} + +// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes +func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, any, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params any) (bool, any, error) { + if request, ok := params.(*ScopedAuthRequest); ok { + return handler(request) + } + return false, nil, nil + }) +} + +// UserPassAuthentication authentication function +type UserPassAuthentication func(string, string) (any, error) + +// UserPassAuthenticationCtx authentication function with context.Context +type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, any, error) + +// TokenAuthentication authentication function +type TokenAuthentication func(string) (any, error) + +// TokenAuthenticationCtx authentication function with context.Context +type TokenAuthenticationCtx func(context.Context, string) (context.Context, any, error) + +// ScopedTokenAuthentication authentication function +type ScopedTokenAuthentication func(string, []string) (any, error) + +// ScopedTokenAuthenticationCtx authentication function with context.Context +type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, any, error) + +var DefaultRealmName = "API" + +type secCtxKey uint8 + +const ( + failedBasicAuth secCtxKey = iota + oauth2SchemeName +) + +func FailedBasicAuth(r *http.Request) string { + return FailedBasicAuthCtx(r.Context()) +} + +func FailedBasicAuthCtx(ctx context.Context) string { + v, ok := ctx.Value(failedBasicAuth).(string) + if !ok { + return "" + } + return v +} + +func OAuth2SchemeName(r *http.Request) string { + return OAuth2SchemeNameCtx(r.Context()) +} + +func OAuth2SchemeNameCtx(ctx context.Context) string { + v, ok := ctx.Value(oauth2SchemeName).(string) + if !ok { + return "" + } + return v +} + +// BasicAuth creates a basic auth authenticator with the provided authentication function +func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { + return BasicAuthRealm(DefaultRealmName, authenticate) +} + +// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name +func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + if usr, pass, ok := r.BasicAuth(); ok { + p, err := authenticate(usr, pass) + if err != nil { + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + } + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context +func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { + return BasicAuthRealmCtx(DefaultRealmName, authenticate) +} + +// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context +func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + if usr, pass, ok := r.BasicAuth(); ok { + ctx, p, err := authenticate(r.Context(), usr, pass) + if err != nil { + ctx = context.WithValue(ctx, failedBasicAuth, realm) + } + *r = *r.WithContext(ctx) + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// APIKeyAuth creates an authenticator that uses a token for authorization. +// This token can be obtained from either a header or a query string +func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(http.StatusInternalServerError, "api key auth: in value needs to be either \"query\" or \"header\"")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + p, err := authenticate(token) + return true, p, err + }) +} + +// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. +// This token can be obtained from either a header or a query string +func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(http.StatusInternalServerError, "api key auth: in value needs to be either \"query\" or \"header\"")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, any, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + ctx, p, err := authenticate(r.Context(), token) + *r = *r.WithContext(ctx) + return true, p, err + }) +} + +// ScopedAuthRequest contains both a http request and the required scopes for a particular operation +type ScopedAuthRequest struct { + Request *http.Request + RequiredScopes []string +} + +// BearerAuth for use with oauth2 flows +func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, any, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if after, ok := strings.CutPrefix(hdr, prefix); ok { + token = after + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get(accessTokenParam) + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue(accessTokenParam) + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + *r.Request = *r.Request.WithContext(rctx) + p, err := authenticate(token, r.RequiredScopes) + return true, p, err + }) +} + +// BearerAuthCtx for use with oauth2 flows with support for context.Context. +func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, any, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if after, ok := strings.CutPrefix(hdr, prefix); ok { + token = after + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get(accessTokenParam) + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue(accessTokenParam) + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + ctx, p, err := authenticate(rctx, token, r.RequiredScopes) + *r.Request = *r.Request.WithContext(ctx) + return true, p, err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go new file mode 100644 index 00000000000..69bd497a3c2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authorizer.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package security + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// Authorized provides a default implementation of the Authorizer interface where all +// requests are authorized (successful) +func Authorized() runtime.Authorizer { + return runtime.AuthorizerFunc(func(_ *http.Request, _ any) error { return nil }) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 00000000000..7e10a5a56c2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,79 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 00000000000..2b8e4ac09d0 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,105 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag/jsonutils" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data any) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data any) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := jsonutils.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 00000000000..19894e78451 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 00000000000..5060b5c8e91 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data any) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data any) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go new file mode 100644 index 00000000000..ca63430e0b7 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlpc + +import ( + "io" + + "github.com/go-openapi/runtime" + yaml "go.yaml.in/yaml/v3" +) + +// YAMLConsumer creates a consumer for yaml data +func YAMLConsumer() runtime.Consumer { + return runtime.ConsumerFunc(func(r io.Reader, v any) error { + dec := yaml.NewDecoder(r) + return dec.Decode(v) + }) +} + +// YAMLProducer creates a producer for yaml data +func YAMLProducer() runtime.Producer { + return runtime.ProducerFunc(func(w io.Writer, v any) error { + enc := yaml.NewEncoder(w) + defer enc.Close() + return enc.Encode(v) + }) +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 00000000000..f47cb2045f1 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1 @@ +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 00000000000..1ad5adf47e6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 00000000000..3203bd2556d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,58 @@ +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 00000000000..10fba77a839 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,86 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "maps" + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (any, bool) + Set(string, any) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]any +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]any, len(s.store)) + s.lock.RLock() + maps.Copy(store, s.store) + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (any, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data any) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]any{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 00000000000..fafe639b45d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag/jsonutils" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 00000000000..f4316c26333 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...any) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 00000000000..0d0b69996cc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 00000000000..e39ab8bf71e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") + + // ErrSpec is an error raised by the spec package + ErrSpec = errors.New("spec error") +) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 00000000000..cc4bd1cba1b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,598 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" +) + +const smallPrealloc = 10 + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, smallPrealloc) + parentRefs = append(parentRefs, "#/definitions/"+key) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +func baseForRoot(root any, cache ResolutionCache) string { + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + + if root == nil { + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]any{} + } + + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root any, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, smallPrealloc) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + if !resolver.options.SkipSchemas { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + // when "expand" with SkipSchema, we just rebase the existing $ref without replacing + // the full schema. + rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) + if err != nil { + return nil, err + } + target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + + return &target, nil + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, smallPrealloc) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root any, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root any, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input any) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input any, resolver *schemaLoader, basePath string) error { + ref, sch, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil && sch == nil { // nothing to do + return nil + } + + parentRefs := make([]string, 0, smallPrealloc) + if ref != nil { + // dereference this $ref + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ = getRefAndSchema(input) + } + + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + } + } + + // $ref expansion or rebasing is performed by expandSchema below + if ref != nil { + *ref = Ref{} + } + + // expand schema + // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + + if s != nil { // guard for when continuing on error + *sch = *s + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 00000000000..17b8efbf100 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 00000000000..ab251ef7659 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,192 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue any) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(maximum int64) *Header { + h.MaxLength = &maximum + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(minimum int64) *Header { + h.MinLength = &minimum + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(maximum float64, exclusive bool) *Header { + h.Maximum = &maximum + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(minimum float64, exclusive bool) *Header { + h.Minimum = &minimum + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...any) *Header { + h.Enum = append([]any{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (any, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 00000000000..9401065bbde --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,173 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// Extensions vendor specific extensions +type Extensions map[string]any + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value any) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]any) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value any) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]any) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]any) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]any + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]any{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (any, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 00000000000..d30ca3569b1 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,223 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default any `json:"default,omitempty"` + Example any `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue any) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(maximum int64) *Items { + i.MaxLength = &maximum + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(minimum int64) *Items { + i.MinLength = &minimum + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(maximum float64, exclusive bool) *Items { + i.Maximum = &maximum + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(minimum float64, exclusive bool) *Items { + i.Minimum = &minimum + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...any) *Items { + i.Enum = append([]any{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (any, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 00000000000..286b237e2bf --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,45 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag/jsonutils" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 00000000000..c3ea810aaf3 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,191 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if after, ok := strings.CutPrefix(u.Path, docPath); ok { + newBase.Path = after + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 00000000000..0d55632349f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,32 @@ +//go:build !windows + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(_ *url.URL, _ string) { +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 00000000000..61515c9a163 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,143 @@ +// -build windows + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 00000000000..bbf8c757372 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,392 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +func init() { + gob.Register(map[string]any{}) + gob.Register([]any{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + *Alias + + Security []map[string][]string `json:"security,omitempty"` + }{ + Alias: (*Alias)(&op), + Security: op.Security, + }) + } + + return json.Marshal(&struct { + *Alias + + Security []map[string][]string `json:"security"` + }{ + Alias: (*Alias)(&op), + Security: op.Security, + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (any, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b1, b2) + return concated, nil +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 00000000000..b94b7682ac8 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,315 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (any, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue any) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(maximum int64) *Parameter { + p.MaxLength = &maximum + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(minimum int64) *Parameter { + p.MinLength = &minimum + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(maximum float64, exclusive bool) *Parameter { + p.Maximum = &maximum + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(minimum float64, exclusive bool) *Parameter { + p.Minimum = &minimum + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...any) *Parameter { + p.Enum = append([]any{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 00000000000..c692b89e46c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (any, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 00000000000..b9e42184b19 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,87 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag/jsonutils" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (any, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + var d any + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 00000000000..4142308dd39 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Schema + + Name string +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return ii < ij + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 00000000000..c9279262942 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,184 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + // true if the response is >= 200 and < 300 + return rr.StatusCode/100 == 2 //nolint:mnd + } + + if !r.HasFileScheme && !r.HasFullFilePath && !r.HasURLPathOnly { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]any{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]any + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]any) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 00000000000..b82c1821332 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag/jsonutils" +) + +func resolveAnyWithBase(root any, ref *Ref, result any, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root any, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root any, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]any: + newSch := new(Schema) + if err = jsonutils.FromDynamicJSON(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root any, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root any, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root any, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root any, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root any, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root any, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 00000000000..e5a7e5c40d4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]any `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (any, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]any `json:"examples,omitempty"` + }{ + Description: r.Description, + Schema: r.Schema, + Examples: r.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2, b3), nil +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example any) *Response { + if r.Examples == nil { + r.Examples = make(map[string]any) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 00000000000..733a1315d02 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag/jsonutils" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (any, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := jsonutils.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + if v, ok := res["default"]; ok { + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes + delete(res, "default") + } + for k, v := range res { + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp + } + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 00000000000..6623728a41a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,636 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonname" + "github.com/go-openapi/swag/jsonutils" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]any{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]any + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]any) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default any `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []any `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example any `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + + ExtraProps map[string]any `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (any, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(maximum int64) *Schema { + s.MaxProperties = &maximum + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(minimum int64) *Schema { + s.MinProperties = &minimum + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue any) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(maximum int64) *Schema { + s.MaxLength = &maximum + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(minimum int64) *Schema { + s.MinLength = &minimum + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(maximum float64, exclusive bool) *Schema { + s.Maximum = &maximum + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(minimum float64, exclusive bool) *Schema { + s.Minimum = &minimum + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...any) *Schema { + s.Enum = append([]any{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example any) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v: %w", err, ErrSpec) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v: %w", err, ErrSpec) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v: %w", err, ErrSpec) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v: %w", err, ErrSpec) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v: %w", err, ErrSpec) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v: %w", err, ErrSpec) + } + b6 = jj + } + return jsonutils.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]any + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range jsonname.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]any{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]any{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 00000000000..8d4a9853256 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,322 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag/jsonutils" + "github.com/go-openapi/swag/loading" + "github.com/go-openapi/swag/stringutils" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := loading.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root any + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target any, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res any + data any + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return jsonutils.FromDynamicJSON(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (any, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + data, fromCache := r.cache.Get(normalized) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc any + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = stringutils.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +func (r *schemaLoader) deref(input any, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target any, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root any, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 00000000000..bcbb84743e3 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 00000000000..ebe10ed32d6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 00000000000..46a4a7e2f9f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,159 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (any, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 00000000000..05c3fc775cf --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 00000000000..f7cd0f608c2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,433 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "slices" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (any, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (any, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) > 0 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !bytes.Equal(data, []byte("false")) + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (any, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + return slices.Contains(s, value) +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (any, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single any + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T: %w", single, ErrSpec) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 00000000000..ae98fd985fb --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag/jsonutils" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (any, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return jsonutils.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 00000000000..8d0c81acd68 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,14 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 00000000000..4f70e301732 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,222 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []any `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value any +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, any)) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, any)) { + const maxNumberValidations = 5 + done := make(clearedValidations, 0, maxNumberValidations) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, any)) { + const maxStringValidations = 3 + done := make(clearedValidations, 0, maxStringValidations) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, any)) { + const maxArrayValidations = 3 + done := make(clearedValidations, 0, maxArrayValidations) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, any)) { + const maxObjectValidations = 3 + done := make(clearedValidations, 0, maxObjectValidations) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 00000000000..bf2f8f18b24 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes new file mode 100644 index 00000000000..d020be8ea4e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 00000000000..dd91ed6a04e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 00000000000..1ad5adf47e6 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,75 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 00000000000..de5afe13760 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,92 @@ +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5, uuid7 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Type conversion + +All types defined here are stringers and may be converted to strings with `.String()`. +Note that most types defined by this package may be converted directly to string like `string(Email{})`. + +`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. +Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` + +## Using pointers + +The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does +with primitive types. + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- [UUID3](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-3) +- [UUID4](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-4) +- [UUID5](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-5) +- [UUID7](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-7) +- [ULID](https://github.com/ulid/spec) + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 00000000000..0eec8f6432c --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID //nolint:revive + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { //nolint:revive + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw any) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v: %w", v, ErrFormat) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).Hex() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 00000000000..8aa17b8ea55 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,151 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "time" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v: %w", v, ErrFormat) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (d Date) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (d *Date) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Date) MarshalBinary() ([]byte, error) { + return time.Time(d).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Date) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *d = Date(original) + + return nil +} + +// Equal checks if two Date instances are equal +func (d Date) Equal(d2 Date) bool { + return time.Time(d).Equal(time.Time(d2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 00000000000..8a80cfbdb8a --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2110 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "fmt" + "net" + "net/mail" + "net/netip" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/google/uuid" + "golang.org/x/net/idna" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114. + // + // Deprecated: this package no longer uses regular expressions to validate hostnames. + HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z0-9-\p{L}]){2,63})$` + + // json null type + jsonNull = "null" +) + +const ( + // UUIDPattern Regex for UUID that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` + + // UUID3Pattern Regex for UUID3 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` + + // UUID4Pattern Regex for UUID4 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + + // UUID5Pattern Regex for UUID5 that allows uppercase + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + + isbn10Pattern string = "^(?:[0-9]{9}X|[0-9]{10})$" + isbn13Pattern string = "^(?:[0-9]{13})$" + usCardPattern string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ssnPattern string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + hexColorPattern string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + rgbColorPattern string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" +) + +const ( + isbnVersion10 = 10 + isbnVersion13 = 13 + decimalBase = 10 +) + +var ( + idnaHostChecker = idna.New( + idna.ValidateForRegistration(), // shorthand for [idna.StrictDomainName], [idna.ValidateLabels], [idna.VerifyDNSLength], [idna.BidiRule] + ) + + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + rxISBN10 = regexp.MustCompile(isbn10Pattern) + rxISBN13 = regexp.MustCompile(isbn13Pattern) + rxCreditCard = regexp.MustCompile(usCardPattern) + rxSSN = regexp.MustCompile(ssnPattern) + rxHexcolor = regexp.MustCompile(hexColorPattern) + rxRGBcolor = regexp.MustCompile(rgbColorPattern) +) + +// IsHostname returns true when the string is a valid hostname. +// +// It follows the rules detailed at https://url.spec.whatwg.org/#concept-host-parser +// and implemented by most modern web browsers. +// +// It supports IDNA rules regarding internationalized names with unicode. +// +// Besides: +// * the empty string is not a valid host name +// * a trailing dot is allowed in names and IPv4's (not IPv6) +// * a host name can be a valid IPv4 (with decimal, octal or hexadecimal numbers) or IPv6 address +// * IPv6 zones are disallowed +// * top-level domains can be unicode (cf. https://www.iana.org/domains/root/db). +// +// NOTE: this validator doesn't check top-level domains against the IANA root database. +// It merely ensures that a top-level domain in a FQDN is at least 2 code points long. +func IsHostname(str string) bool { + if len(str) == 0 { + return false + } + + // IP v6 check + if ipv6Cleaned, found := strings.CutPrefix(str, "["); found { + ipv6Cleaned, found = strings.CutSuffix(ipv6Cleaned, "]") + if !found { + return false + } + + return isValidIPv6(ipv6Cleaned) + } + + // IDNA check + res, err := idnaHostChecker.ToASCII(strings.ToLower(str)) + if err != nil || res == "" { + return false + } + + parts := strings.Split(res, ".") + + // IP v4 check + lastPart, lastIndex, shouldBeIPv4 := domainEndsAsNumber(parts) + if shouldBeIPv4 { + // domain ends in a number: must be an IPv4 + return isValidIPv4(parts[:lastIndex+1]) // if the last part is a trailing dot, remove it + } + + // check TLD length (excluding trailing dot) + const minTLDLength = 2 + if lastIndex > 0 && len(lastPart) < minTLDLength { + return false + } + + return true +} + +// domainEndsAsNumber determines if a domain name ends with a decimal, octal or hex digit, +// accounting for a possible trailing dot (the last part being empty in that case). +// +// It returns the last non-trailing dot part and if that part consists only of (dec/hex/oct) digits. +func domainEndsAsNumber(parts []string) (lastPart string, lastIndex int, ok bool) { + // NOTE: using ParseUint(x, 0, 32) is not an option, as the IPv4 format supported why WHATWG + // doesn't support notations such as "0b1001" (binary digits) or "0o666" (alternate notation for octal digits). + lastIndex = len(parts) - 1 + lastPart = parts[lastIndex] + if len(lastPart) == 0 { + // trailing dot + if len(parts) == 1 { // dot-only string: normally already ruled out by the IDNA check above + return lastPart, lastIndex, false + } + + lastIndex-- + lastPart = parts[lastIndex] + } + + if startOfHexDigit(lastPart) { + for _, b := range []byte(lastPart[2:]) { + if !isHexDigit(b) { + return lastPart, lastIndex, false + } + } + + return lastPart, lastIndex, true + } + + // check for decimal and octal + for _, b := range []byte(lastPart) { + if !isASCIIDigit(b) { + return lastPart, lastIndex, false + } + } + + return lastPart, lastIndex, true +} + +func startOfHexDigit(str string) bool { + return strings.HasPrefix(str, "0x") // the input has already been lower-cased +} + +func startOfOctalDigit(str string) bool { + if str == "0" { + // a single "0" is considered decimal + return false + } + + return strings.HasPrefix(str, "0") +} + +func isValidIPv6(str string) bool { + // disallow empty ipv6 address + if len(str) == 0 { + return false + } + + addr, err := netip.ParseAddr(str) + if err != nil { + return false + } + + if !addr.Is6() { + return false + } + + // explicit desupport of IPv6 zones + if addr.Zone() != "" { + return false + } + + return true +} + +// isValidIPv4 parses an IPv4 with deciaml, hex or octal digit parts. +// +// We can't rely on [netip.ParseAddr] because we may get a mix of decimal, octal and hex digits. +// +// Examples of valid addresses not supported by [netip.ParseAddr] or [net.ParseIP]: +// +// "192.0x00A80001" +// "0300.0250.0340.001" +// "1.0x.1.1" +// +// But not: +// +// "0b1010.2.3.4" +// "0o07.2.3.4" +func isValidIPv4(parts []string) bool { + // NOTE: using ParseUint(x, 0, 32) is not an option, even though it would simplify this code a lot. + // The IPv4 format supported why WHATWG doesn't support notations such as "0b1001" (binary digits) + // or "0o666" (alternate notation for octal digits). + const ( + maxPartsInIPv4 = 4 + maxDigitsInPart = 11 // max size of a 4-bytes hex or octal digit + ) + + if len(parts) == 0 || len(parts) > maxPartsInIPv4 { + return false + } + + // we call this when we know that the last part is a digit part, so len(lastPart)>0 + + digits := make([]uint64, 0, maxPartsInIPv4) + for _, part := range parts { + if len(part) == 0 { // empty part: this case has normally been already ruled out by the IDNA check above + return false + } + + if len(part) > maxDigitsInPart { // whether decimal, octal or hex, an address can't exceed that length + return false + } + + if !isASCIIDigit(part[0]) { // start of an IPv4 part is always a digit + return false + } + + switch { + case startOfHexDigit(part): + const hexDigitOffset = 2 + hexString := part[hexDigitOffset:] + if len(hexString) == 0 { // 0x part: assume 0 + digits = append(digits, 0) + + continue + } + + hexDigit, err := strconv.ParseUint(hexString, 16, 32) + if err != nil { + return false + } + + digits = append(digits, hexDigit) + + continue + + case startOfOctalDigit(part): + const octDigitOffset = 1 + octString := part[octDigitOffset:] // we know that this is not empty + octDigit, err := strconv.ParseUint(octString, 8, 32) + if err != nil { + return false + } + + digits = append(digits, octDigit) + + default: // assume decimal digits (0-255) + // we know that we don't have a leading 0 (would have been caught by octal digit) + decDigit, err := strconv.ParseUint(part, 10, 8) + if err != nil { + return false + } + + digits = append(digits, decDigit) + } + } + + // now check the digits: the last digit may encompass several parts of the address + lastDigit := digits[len(digits)-1] + if lastDigit > uint64(1)< 1 { + const maxUint8 = uint64(^uint8(0)) + + for i := range len(digits) - 2 { + if digits[i] > maxUint8 { + return false + } + } + } + + return true +} + +func isHexDigit(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': // assume the input string to be lower case + return true + } + return false +} + +func isASCIIDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed +func IsUUID(str string) bool { + _, err := uuid.Parse(str) + return err == nil +} + +const ( + uuidV3 = 3 + uuidV4 = 4 + uuidV5 = 5 + uuidV7 = 7 +) + +// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed +func IsUUID3(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV3) +} + +// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed +func IsUUID4(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV4) +} + +// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed +func IsUUID5(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV5) +} + +// IsUUID7 returns true is the string matches a UUID v7, upper case is allowed +func IsUUID7(str string) bool { + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(uuidV7) +} + +// IsEmail validates an email address. +func IsEmail(str string) bool { + addr, e := mail.ParseAddress(str) + return e == nil && addr.Address != "" +} + +func init() { + // register formats in the default registry: + // - byte + // - creditcard + // - email + // - hexcolor + // - hostname + // - ipv4 + // - ipv6 + // - cidr + // - isbn + // - isbn10 + // - isbn13 + // - mac + // - password + // - rgbcolor + // - ssn + // - uri + // - uuid + // - uuid3 + // - uuid4 + // - uuid5 + // - uuid7 + u := URI("") + Default.Add("uri", &u, isRequestURI) + + eml := Email("") + Default.Add("email", &eml, IsEmail) + + hn := Hostname("") + Default.Add("hostname", &hn, IsHostname) + + ip4 := IPv4("") + Default.Add("ipv4", &ip4, isIPv4) + + ip6 := IPv6("") + Default.Add("ipv6", &ip6, isIPv6) + + cidr := CIDR("") + Default.Add("cidr", &cidr, isCIDR) + + mac := MAC("") + Default.Add("mac", &mac, isMAC) + + uid := UUID("") + Default.Add("uuid", &uid, IsUUID) + + uid3 := UUID3("") + Default.Add("uuid3", &uid3, IsUUID3) + + uid4 := UUID4("") + Default.Add("uuid4", &uid4, IsUUID4) + + uid5 := UUID5("") + Default.Add("uuid5", &uid5, IsUUID5) + + uid7 := UUID7("") + Default.Add("uuid7", &uid7, IsUUID7) + + isbn := ISBN("") + Default.Add("isbn", &isbn, func(str string) bool { return isISBN10(str) || isISBN13(str) }) + + isbn10 := ISBN10("") + Default.Add("isbn10", &isbn10, isISBN10) + + isbn13 := ISBN13("") + Default.Add("isbn13", &isbn13, isISBN13) + + cc := CreditCard("") + Default.Add("creditcard", &cc, isCreditCard) + + ssn := SSN("") + Default.Add("ssn", &ssn, isSSN) + + hc := HexColor("") + Default.Add("hexcolor", &hc, isHexcolor) + + rc := RGBColor("") + Default.Add("rgbcolor", &rc, isRGBcolor) + + b64 := Base64([]byte(nil)) + Default.Add("byte", &b64, isBase64) + + pw := Password("") + Default.Add("password", &pw, func(_ string) bool { return true }) +} + +// Base64 represents a base64 encoded string, using URLEncoding alphabet +// +// swagger:strfmt byte +type Base64 []byte + +// MarshalText turns this instance into text +func (b Base64) MarshalText() ([]byte, error) { + enc := base64.URLEncoding + src := []byte(b) + buf := make([]byte, enc.EncodedLen(len(src))) + enc.Encode(buf, src) + return buf, nil +} + +// UnmarshalText hydrates this instance from text +func (b *Base64) UnmarshalText(data []byte) error { // validation is performed later on + enc := base64.URLEncoding + dbuf := make([]byte, enc.DecodedLen(len(data))) + + n, err := enc.Decode(dbuf, data) + if err != nil { + return err + } + + *b = dbuf[:n] + return nil +} + +// Scan read a value from a database driver +func (b *Base64) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + dbuf := make([]byte, base64.StdEncoding.DecodedLen(len(v))) + n, err := base64.StdEncoding.Decode(dbuf, v) + if err != nil { + return err + } + *b = dbuf[:n] + case string: + vv, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err + } + *b = Base64(vv) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Base64 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (b Base64) Value() (driver.Value, error) { + return driver.Value(b.String()), nil +} + +func (b Base64) String() string { + return base64.StdEncoding.EncodeToString([]byte(b)) +} + +// MarshalJSON returns the Base64 as JSON +func (b Base64) MarshalJSON() ([]byte, error) { + return json.Marshal(b.String()) +} + +// UnmarshalJSON sets the Base64 from JSON +func (b *Base64) UnmarshalJSON(data []byte) error { + var b64str string + if err := json.Unmarshal(data, &b64str); err != nil { + return err + } + vb, err := base64.StdEncoding.DecodeString(b64str) + if err != nil { + return err + } + *b = Base64(vb) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (b *Base64) DeepCopyInto(out *Base64) { + *out = *b +} + +// DeepCopy copies the receiver into a new Base64. +func (b *Base64) DeepCopy() *Base64 { + if b == nil { + return nil + } + out := new(Base64) + b.DeepCopyInto(out) + return out +} + +// URI represents the uri string format as specified by the json schema spec +// +// swagger:strfmt uri +type URI string + +// MarshalText turns this instance into text +func (u URI) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *URI) UnmarshalText(data []byte) error { // validation is performed later on + *u = URI(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *URI) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = URI(string(v)) + case string: + *u = URI(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u URI) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u URI) String() string { + return string(u) +} + +// MarshalJSON returns the URI as JSON +func (u URI) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the URI from JSON +func (u *URI) UnmarshalJSON(data []byte) error { + var uristr string + if err := json.Unmarshal(data, &uristr); err != nil { + return err + } + *u = URI(uristr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *URI) DeepCopyInto(out *URI) { + *out = *u +} + +// DeepCopy copies the receiver into a new URI. +func (u *URI) DeepCopy() *URI { + if u == nil { + return nil + } + out := new(URI) + u.DeepCopyInto(out) + return out +} + +// Email represents the email string format as specified by the json schema spec +// +// swagger:strfmt email +type Email string + +// MarshalText turns this instance into text +func (e Email) MarshalText() ([]byte, error) { + return []byte(string(e)), nil +} + +// UnmarshalText hydrates this instance from text +func (e *Email) UnmarshalText(data []byte) error { // validation is performed later on + *e = Email(string(data)) + return nil +} + +// Scan read a value from a database driver +func (e *Email) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *e = Email(string(v)) + case string: + *e = Email(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Email from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (e Email) Value() (driver.Value, error) { + return driver.Value(string(e)), nil +} + +func (e Email) String() string { + return string(e) +} + +// MarshalJSON returns the Email as JSON +func (e Email) MarshalJSON() ([]byte, error) { + return json.Marshal(string(e)) +} + +// UnmarshalJSON sets the Email from JSON +func (e *Email) UnmarshalJSON(data []byte) error { + var estr string + if err := json.Unmarshal(data, &estr); err != nil { + return err + } + *e = Email(estr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (e *Email) DeepCopyInto(out *Email) { + *out = *e +} + +// DeepCopy copies the receiver into a new Email. +func (e *Email) DeepCopy() *Email { + if e == nil { + return nil + } + out := new(Email) + e.DeepCopyInto(out) + return out +} + +// Hostname represents the hostname string format as specified by the json schema spec +// +// swagger:strfmt hostname +type Hostname string + +// MarshalText turns this instance into text +func (h Hostname) MarshalText() ([]byte, error) { + return []byte(string(h)), nil +} + +// UnmarshalText hydrates this instance from text +func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed later on + *h = Hostname(string(data)) + return nil +} + +// Scan read a value from a database driver +func (h *Hostname) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *h = Hostname(string(v)) + case string: + *h = Hostname(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Hostname from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (h Hostname) Value() (driver.Value, error) { + return driver.Value(string(h)), nil +} + +func (h Hostname) String() string { + return string(h) +} + +// MarshalJSON returns the Hostname as JSON +func (h Hostname) MarshalJSON() ([]byte, error) { + return json.Marshal(string(h)) +} + +// UnmarshalJSON sets the Hostname from JSON +func (h *Hostname) UnmarshalJSON(data []byte) error { + var hstr string + if err := json.Unmarshal(data, &hstr); err != nil { + return err + } + *h = Hostname(hstr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (h *Hostname) DeepCopyInto(out *Hostname) { + *out = *h +} + +// DeepCopy copies the receiver into a new Hostname. +func (h *Hostname) DeepCopy() *Hostname { + if h == nil { + return nil + } + out := new(Hostname) + h.DeepCopyInto(out) + return out +} + +// IPv4 represents an IP v4 address +// +// swagger:strfmt ipv4 +type IPv4 string + +// MarshalText turns this instance into text +func (u IPv4) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed later on + *u = IPv4(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *IPv4) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = IPv4(string(v)) + case string: + *u = IPv4(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u IPv4) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u IPv4) String() string { + return string(u) +} + +// MarshalJSON returns the IPv4 as JSON +func (u IPv4) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the IPv4 from JSON +func (u *IPv4) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = IPv4(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *IPv4) DeepCopyInto(out *IPv4) { + *out = *u +} + +// DeepCopy copies the receiver into a new IPv4. +func (u *IPv4) DeepCopy() *IPv4 { + if u == nil { + return nil + } + out := new(IPv4) + u.DeepCopyInto(out) + return out +} + +// IPv6 represents an IP v6 address +// +// swagger:strfmt ipv6 +type IPv6 string + +// MarshalText turns this instance into text +func (u IPv6) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed later on + *u = IPv6(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *IPv6) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = IPv6(string(v)) + case string: + *u = IPv6(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.IPv6 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u IPv6) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u IPv6) String() string { + return string(u) +} + +// MarshalJSON returns the IPv6 as JSON +func (u IPv6) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the IPv6 from JSON +func (u *IPv6) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = IPv6(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *IPv6) DeepCopyInto(out *IPv6) { + *out = *u +} + +// DeepCopy copies the receiver into a new IPv6. +func (u *IPv6) DeepCopy() *IPv6 { + if u == nil { + return nil + } + out := new(IPv6) + u.DeepCopyInto(out) + return out +} + +// CIDR represents a Classless Inter-Domain Routing notation +// +// swagger:strfmt cidr +type CIDR string + +// MarshalText turns this instance into text +func (u CIDR) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed later on + *u = CIDR(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *CIDR) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = CIDR(string(v)) + case string: + *u = CIDR(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.CIDR from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u CIDR) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u CIDR) String() string { + return string(u) +} + +// MarshalJSON returns the CIDR as JSON +func (u CIDR) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the CIDR from JSON +func (u *CIDR) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = CIDR(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *CIDR) DeepCopyInto(out *CIDR) { + *out = *u +} + +// DeepCopy copies the receiver into a new CIDR. +func (u *CIDR) DeepCopy() *CIDR { + if u == nil { + return nil + } + out := new(CIDR) + u.DeepCopyInto(out) + return out +} + +// MAC represents a 48 bit MAC address +// +// swagger:strfmt mac +type MAC string + +// MarshalText turns this instance into text +func (u MAC) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *MAC) UnmarshalText(data []byte) error { // validation is performed later on + *u = MAC(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *MAC) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = MAC(string(v)) + case string: + *u = MAC(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u MAC) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u MAC) String() string { + return string(u) +} + +// MarshalJSON returns the MAC as JSON +func (u MAC) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the MAC from JSON +func (u *MAC) UnmarshalJSON(data []byte) error { + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = MAC(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *MAC) DeepCopyInto(out *MAC) { + *out = *u +} + +// DeepCopy copies the receiver into a new MAC. +func (u *MAC) DeepCopy() *MAC { + if u == nil { + return nil + } + out := new(MAC) + u.DeepCopyInto(out) + return out +} + +// UUID represents a uuid string format +// +// swagger:strfmt uuid +type UUID string + +// MarshalText turns this instance into text +func (u UUID) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID(string(v)) + case string: + *u = UUID(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID) DeepCopyInto(out *UUID) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID. +func (u *UUID) DeepCopy() *UUID { + if u == nil { + return nil + } + out := new(UUID) + u.DeepCopyInto(out) + return out +} + +// UUID3 represents a uuid3 string format +// +// swagger:strfmt uuid3 +type UUID3 string + +// MarshalText turns this instance into text +func (u UUID3) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID3(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID3) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID3(string(v)) + case string: + *u = UUID3(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID3 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID3) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID3) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID3) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID3) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID3(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID3) DeepCopyInto(out *UUID3) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID3. +func (u *UUID3) DeepCopy() *UUID3 { + if u == nil { + return nil + } + out := new(UUID3) + u.DeepCopyInto(out) + return out +} + +// UUID4 represents a uuid4 string format +// +// swagger:strfmt uuid4 +type UUID4 string + +// MarshalText turns this instance into text +func (u UUID4) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID4(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID4) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID4(string(v)) + case string: + *u = UUID4(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID4 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID4) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID4) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID4) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID4) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID4(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID4) DeepCopyInto(out *UUID4) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID4. +func (u *UUID4) DeepCopy() *UUID4 { + if u == nil { + return nil + } + out := new(UUID4) + u.DeepCopyInto(out) + return out +} + +// UUID5 represents a uuid5 string format +// +// swagger:strfmt uuid5 +type UUID5 string + +// MarshalText turns this instance into text +func (u UUID5) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID5(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID5) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID5(string(v)) + case string: + *u = UUID5(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID5 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID5) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID5) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID5) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID5) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID5(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID5) DeepCopyInto(out *UUID5) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID5. +func (u *UUID5) DeepCopy() *UUID5 { + if u == nil { + return nil + } + out := new(UUID5) + u.DeepCopyInto(out) + return out +} + +// UUID7 represents a uuid7 string format +// +// swagger:strfmt uuid7 +type UUID7 string + +// MarshalText turns this instance into text +func (u UUID7) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *UUID7) UnmarshalText(data []byte) error { // validation is performed later on + *u = UUID7(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *UUID7) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = UUID7(string(v)) + case string: + *u = UUID7(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.UUID7 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u UUID7) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u UUID7) String() string { + return string(u) +} + +// MarshalJSON returns the UUID as JSON +func (u UUID7) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the UUID from JSON +func (u *UUID7) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = UUID7(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *UUID7) DeepCopyInto(out *UUID7) { + *out = *u +} + +// DeepCopy copies the receiver into a new UUID7. +func (u *UUID7) DeepCopy() *UUID7 { + if u == nil { + return nil + } + out := new(UUID7) + u.DeepCopyInto(out) + return out +} + +// ISBN represents an isbn string format +// +// swagger:strfmt isbn +type ISBN string + +// MarshalText turns this instance into text +func (u ISBN) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed later on + *u = ISBN(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *ISBN) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = ISBN(string(v)) + case string: + *u = ISBN(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.ISBN from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u ISBN) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u ISBN) String() string { + return string(u) +} + +// MarshalJSON returns the ISBN as JSON +func (u ISBN) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the ISBN from JSON +func (u *ISBN) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = ISBN(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ISBN) DeepCopyInto(out *ISBN) { + *out = *u +} + +// DeepCopy copies the receiver into a new ISBN. +func (u *ISBN) DeepCopy() *ISBN { + if u == nil { + return nil + } + out := new(ISBN) + u.DeepCopyInto(out) + return out +} + +// ISBN10 represents an isbn 10 string format +// +// swagger:strfmt isbn10 +type ISBN10 string + +// MarshalText turns this instance into text +func (u ISBN10) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed later on + *u = ISBN10(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *ISBN10) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = ISBN10(string(v)) + case string: + *u = ISBN10(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.ISBN10 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u ISBN10) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u ISBN10) String() string { + return string(u) +} + +// MarshalJSON returns the ISBN10 as JSON +func (u ISBN10) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the ISBN10 from JSON +func (u *ISBN10) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = ISBN10(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ISBN10) DeepCopyInto(out *ISBN10) { + *out = *u +} + +// DeepCopy copies the receiver into a new ISBN10. +func (u *ISBN10) DeepCopy() *ISBN10 { + if u == nil { + return nil + } + out := new(ISBN10) + u.DeepCopyInto(out) + return out +} + +// ISBN13 represents an isbn 13 string format +// +// swagger:strfmt isbn13 +type ISBN13 string + +// MarshalText turns this instance into text +func (u ISBN13) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed later on + *u = ISBN13(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *ISBN13) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = ISBN13(string(v)) + case string: + *u = ISBN13(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.ISBN13 from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u ISBN13) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u ISBN13) String() string { + return string(u) +} + +// MarshalJSON returns the ISBN13 as JSON +func (u ISBN13) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the ISBN13 from JSON +func (u *ISBN13) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = ISBN13(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ISBN13) DeepCopyInto(out *ISBN13) { + *out = *u +} + +// DeepCopy copies the receiver into a new ISBN13. +func (u *ISBN13) DeepCopy() *ISBN13 { + if u == nil { + return nil + } + out := new(ISBN13) + u.DeepCopyInto(out) + return out +} + +// CreditCard represents a credit card string format +// +// swagger:strfmt creditcard +type CreditCard string + +// MarshalText turns this instance into text +func (u CreditCard) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *CreditCard) UnmarshalText(data []byte) error { // validation is performed later on + *u = CreditCard(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *CreditCard) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = CreditCard(string(v)) + case string: + *u = CreditCard(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.CreditCard from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u CreditCard) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u CreditCard) String() string { + return string(u) +} + +// MarshalJSON returns the CreditCard as JSON +func (u CreditCard) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the CreditCard from JSON +func (u *CreditCard) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = CreditCard(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *CreditCard) DeepCopyInto(out *CreditCard) { + *out = *u +} + +// DeepCopy copies the receiver into a new CreditCard. +func (u *CreditCard) DeepCopy() *CreditCard { + if u == nil { + return nil + } + out := new(CreditCard) + u.DeepCopyInto(out) + return out +} + +// SSN represents a social security string format +// +// swagger:strfmt ssn +type SSN string + +// MarshalText turns this instance into text +func (u SSN) MarshalText() ([]byte, error) { + return []byte(string(u)), nil +} + +// UnmarshalText hydrates this instance from text +func (u *SSN) UnmarshalText(data []byte) error { // validation is performed later on + *u = SSN(string(data)) + return nil +} + +// Scan read a value from a database driver +func (u *SSN) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *u = SSN(string(v)) + case string: + *u = SSN(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.SSN from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (u SSN) Value() (driver.Value, error) { + return driver.Value(string(u)), nil +} + +func (u SSN) String() string { + return string(u) +} + +// MarshalJSON returns the SSN as JSON +func (u SSN) MarshalJSON() ([]byte, error) { + return json.Marshal(string(u)) +} + +// UnmarshalJSON sets the SSN from JSON +func (u *SSN) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *u = SSN(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *SSN) DeepCopyInto(out *SSN) { + *out = *u +} + +// DeepCopy copies the receiver into a new SSN. +func (u *SSN) DeepCopy() *SSN { + if u == nil { + return nil + } + out := new(SSN) + u.DeepCopyInto(out) + return out +} + +// HexColor represents a hex color string format +// +// swagger:strfmt hexcolor +type HexColor string + +// MarshalText turns this instance into text +func (h HexColor) MarshalText() ([]byte, error) { + return []byte(string(h)), nil +} + +// UnmarshalText hydrates this instance from text +func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed later on + *h = HexColor(string(data)) + return nil +} + +// Scan read a value from a database driver +func (h *HexColor) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *h = HexColor(string(v)) + case string: + *h = HexColor(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.HexColor from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (h HexColor) Value() (driver.Value, error) { + return driver.Value(string(h)), nil +} + +func (h HexColor) String() string { + return string(h) +} + +// MarshalJSON returns the HexColor as JSON +func (h HexColor) MarshalJSON() ([]byte, error) { + return json.Marshal(string(h)) +} + +// UnmarshalJSON sets the HexColor from JSON +func (h *HexColor) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *h = HexColor(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (h *HexColor) DeepCopyInto(out *HexColor) { + *out = *h +} + +// DeepCopy copies the receiver into a new HexColor. +func (h *HexColor) DeepCopy() *HexColor { + if h == nil { + return nil + } + out := new(HexColor) + h.DeepCopyInto(out) + return out +} + +// RGBColor represents a RGB color string format +// +// swagger:strfmt rgbcolor +type RGBColor string + +// MarshalText turns this instance into text +func (r RGBColor) MarshalText() ([]byte, error) { + return []byte(string(r)), nil +} + +// UnmarshalText hydrates this instance from text +func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed later on + *r = RGBColor(string(data)) + return nil +} + +// Scan read a value from a database driver +func (r *RGBColor) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *r = RGBColor(string(v)) + case string: + *r = RGBColor(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.RGBColor from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (r RGBColor) Value() (driver.Value, error) { + return driver.Value(string(r)), nil +} + +func (r RGBColor) String() string { + return string(r) +} + +// MarshalJSON returns the RGBColor as JSON +func (r RGBColor) MarshalJSON() ([]byte, error) { + return json.Marshal(string(r)) +} + +// UnmarshalJSON sets the RGBColor from JSON +func (r *RGBColor) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *r = RGBColor(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (r *RGBColor) DeepCopyInto(out *RGBColor) { + *out = *r +} + +// DeepCopy copies the receiver into a new RGBColor. +func (r *RGBColor) DeepCopy() *RGBColor { + if r == nil { + return nil + } + out := new(RGBColor) + r.DeepCopyInto(out) + return out +} + +// Password represents a password. +// This has no validations and is mainly used as a marker for UI components. +// +// swagger:strfmt password +type Password string + +// MarshalText turns this instance into text +func (r Password) MarshalText() ([]byte, error) { + return []byte(string(r)), nil +} + +// UnmarshalText hydrates this instance from text +func (r *Password) UnmarshalText(data []byte) error { // validation is performed later on + *r = Password(string(data)) + return nil +} + +// Scan read a value from a database driver +func (r *Password) Scan(raw any) error { + switch v := raw.(type) { + case []byte: + *r = Password(string(v)) + case string: + *r = Password(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Password from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts a value to a database driver value +func (r Password) Value() (driver.Value, error) { + return driver.Value(string(r)), nil +} + +func (r Password) String() string { + return string(r) +} + +// MarshalJSON returns the Password as JSON +func (r Password) MarshalJSON() ([]byte, error) { + return json.Marshal(string(r)) +} + +// UnmarshalJSON sets the Password from JSON +func (r *Password) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *r = Password(ustr) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (r *Password) DeepCopyInto(out *Password) { + *out = *r +} + +// DeepCopy copies the receiver into a new Password. +func (r *Password) DeepCopy() *Password { + if r == nil { + return nil + } + out := new(Password) + r.DeepCopyInto(out) + return out +} + +func isRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// isIPv4 checks if the string is an IP version 4. +func isIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// isIPv6 checks if the string is an IP version 6. +func isIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// isCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) +func isCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// isMAC checks if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func isMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// isISBN checks if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be checks both variants. +func isISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + + switch version { + case isbnVersion10: + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = range isbnVersion10 - 1 { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[isbnVersion10-1] == 'X' { + checksum += isbnVersion10 * isbnVersion10 + } else { + checksum += isbnVersion10 * int32(sanitized[isbnVersion10-1]-'0') + } + if checksum%(isbnVersion10+1) == 0 { + return true + } + return false + case isbnVersion13: + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = range isbnVersion13 - 1 { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[isbnVersion13-1]-'0'))-((decimalBase-(checksum%decimalBase))%decimalBase) == 0 + default: + return isISBN(str, isbnVersion10) || isISBN(str, isbnVersion13) + } +} + +// isISBN10 checks if the string is an ISBN version 10. +func isISBN10(str string) bool { + return isISBN(str, isbnVersion10) +} + +// isISBN13 checks if the string is an ISBN version 13. +func isISBN13(str string) bool { + return isISBN(str, isbnVersion13) +} + +// isCreditCard checks if the string is a credit card. +func isCreditCard(str string) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + + number, err := strconv.ParseInt(sanitized, 0, 64) + if err != nil { + return false + } + number, lastDigit := number/decimalBase, number%decimalBase + + var sum int64 + for i := 0; number > 0; i++ { + digit := number % decimalBase + + if i%2 == 0 { + digit *= 2 + if digit > decimalBase-1 { + digit -= decimalBase - 1 + } + } + + sum += digit + number /= decimalBase + } + + return (sum+lastDigit)%decimalBase == 0 +} + +// isSSN will validate the given string as a U.S. Social Security Number +func isSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// isHexcolor checks if the string is a hexadecimal color. +func isHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// isRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func isRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// isBase64 checks if a string is base64 encoded. +func isBase64(str string) bool { + _, err := base64.StdEncoding.DecodeString(str) + + return err == nil +} diff --git a/vendor/github.com/go-openapi/strfmt/doc.go b/vendor/github.com/go-openapi/strfmt/doc.go new file mode 100644 index 00000000000..5825b72108e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/doc.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package strfmt contains custom string formats +// +// TODO: add info on how to define and register a custom format +package strfmt diff --git a/vendor/github.com/go-openapi/strfmt/duration.go b/vendor/github.com/go-openapi/strfmt/duration.go new file mode 100644 index 00000000000..908c1b02f3c --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/duration.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +func init() { + d := Duration(0) + // register this format in the default registry + Default.Add("duration", &d, IsDuration) +} + +const ( + hoursInDay = 24 + daysInWeek = 7 +) + +var ( + timeUnits = [][]string{ + {"ns", "nano"}, + {"us", "µs", "micro"}, + {"ms", "milli"}, + {"s", "sec"}, + {"m", "min"}, + {"h", "hr", "hour"}, + {"d", "day"}, + {"w", "wk", "week"}, + } + + timeMultiplier = map[string]time.Duration{ + "ns": time.Nanosecond, + "us": time.Microsecond, + "ms": time.Millisecond, + "s": time.Second, + "m": time.Minute, + "h": time.Hour, + "d": hoursInDay * time.Hour, + "w": hoursInDay * daysInWeek * time.Hour, + } + + durationMatcher = regexp.MustCompile(`^(((?:-\s?)?\d+)(\.\d+)?\s*([A-Za-zµ]+))`) +) + +// IsDuration returns true if the provided string is a valid duration +func IsDuration(str string) bool { + _, err := ParseDuration(str) + return err == nil +} + +// Duration represents a duration +// +// Duration stores a period of time as a nanosecond count, with the largest +// repesentable duration being approximately 290 years. +// +// swagger:strfmt duration +type Duration time.Duration + +// MarshalText turns this instance into text +func (d Duration) MarshalText() ([]byte, error) { + return []byte(time.Duration(d).String()), nil +} + +// UnmarshalText hydrates this instance from text +func (d *Duration) UnmarshalText(data []byte) error { // validation is performed later on + dd, err := ParseDuration(string(data)) + if err != nil { + return err + } + *d = Duration(dd) + return nil +} + +// ParseDuration parses a duration from a string, compatible with scala duration syntax +func ParseDuration(cand string) (time.Duration, error) { + if dur, err := time.ParseDuration(cand); err == nil { + return dur, nil + } + + var dur time.Duration + ok := false + const expectGroups = 4 + for _, match := range durationMatcher.FindAllStringSubmatch(cand, -1) { + if len(match) < expectGroups { + continue + } + + // remove possible leading - and spaces + value, negative := strings.CutPrefix(match[2], "-") + + // if the duration contains a decimal separator determine a divising factor + const neutral = 1.0 + divisor := neutral + decimal, hasDecimal := strings.CutPrefix(match[3], ".") + if hasDecimal { + divisor = math.Pow10(len(decimal)) + value += decimal // consider the value as an integer: will change units later on + } + + // if the string is a valid duration, parse it + factor, err := strconv.Atoi(strings.TrimSpace(value)) // converts string to int + if err != nil { + return 0, err + } + + if negative { + factor = -factor + } + + unit := strings.ToLower(strings.TrimSpace(match[4])) + + for _, variants := range timeUnits { + last := len(variants) - 1 + multiplier := timeMultiplier[variants[0]] + + for i, variant := range variants { + if (last == i && strings.HasPrefix(unit, variant)) || strings.EqualFold(variant, unit) { + ok = true + if divisor != neutral { + multiplier = time.Duration(float64(multiplier) / divisor) // convert to duration only after having reduced the scale + } + dur += (time.Duration(factor) * multiplier) + } + } + } + } + + if ok { + return dur, nil + } + return 0, fmt.Errorf("unable to parse %s as duration: %w", cand, ErrFormat) +} + +// Scan reads a Duration value from database driver type. +func (d *Duration) Scan(raw any) error { + switch v := raw.(type) { + // TODO: case []byte: // ? + case int64: + *d = Duration(v) + case float64: + *d = Duration(int64(v)) + case nil: + *d = Duration(0) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Duration from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts Duration to a primitive value ready to be written to a database. +func (d Duration) Value() (driver.Value, error) { + return driver.Value(int64(d)), nil +} + +// String converts this duration to a string +func (d Duration) String() string { + return time.Duration(d).String() +} + +// MarshalJSON returns the Duration as JSON +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Duration(d).String()) +} + +// UnmarshalJSON sets the Duration from JSON +func (d *Duration) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + + var dstr string + if err := json.Unmarshal(data, &dstr); err != nil { + return err + } + tt, err := ParseDuration(dstr) + if err != nil { + return err + } + *d = Duration(tt) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Duration) DeepCopyInto(out *Duration) { + *out = *d +} + +// DeepCopy copies the receiver into a new Duration. +func (d *Duration) DeepCopy() *Duration { + if d == nil { + return nil + } + out := new(Duration) + d.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/errors.go b/vendor/github.com/go-openapi/strfmt/errors.go new file mode 100644 index 00000000000..9faa37cf2e5 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/errors.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +type strfmtError string + +// ErrFormat is an error raised by the strfmt package +const ErrFormat strfmtError = "format error" + +func (e strfmtError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go new file mode 100644 index 00000000000..d9d9e04c208 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -0,0 +1,297 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "encoding" + "fmt" + "reflect" + "slices" + "strings" + "sync" + "time" + + "github.com/go-openapi/errors" + "github.com/go-viper/mapstructure/v2" +) + +// Default is the default formats registry +var Default = NewSeededFormats(nil, nil) + +// Validator represents a validator for a string format. +type Validator func(string) bool + +// NewFormats creates a new formats registry seeded with the values from the default +func NewFormats() Registry { + //nolint:forcetypeassert + return NewSeededFormats(Default.(*defaultFormats).data, nil) +} + +// NewSeededFormats creates a new formats registry +func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { + if normalizer == nil { + normalizer = DefaultNameNormalizer + } + // copy here, don't modify the original + return &defaultFormats{ + data: slices.Clone(seeds), + normalizeName: normalizer, + } +} + +type knownFormat struct { + Name string + OrigName string + Type reflect.Type + Validator Validator +} + +// NameNormalizer is a function that normalizes a format name. +type NameNormalizer func(string) string + +// DefaultNameNormalizer removes all dashes +func DefaultNameNormalizer(name string) string { + return strings.ReplaceAll(name, "-", "") +} + +type defaultFormats struct { + sync.Mutex + + data []knownFormat + normalizeName NameNormalizer +} + +// MapStructureHookFunc is a decode hook function for mapstructure +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { + return func(from reflect.Type, to reflect.Type, obj any) (any, error) { + if from.Kind() != reflect.String { + return obj, nil + } + data, ok := obj.(string) + if !ok { + return nil, fmt.Errorf("failed to cast %+v to string: %w", obj, ErrFormat) + } + + for _, v := range f.data { + tpe, _ := f.GetType(v.Name) + if to == tpe { + switch v.Name { + case "date": + d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return nil, err + } + return Date(d), nil + case "datetime": + input := data + if len(input) == 0 { + return nil, fmt.Errorf("empty string is an invalid datetime format: %w", ErrFormat) + } + return ParseDateTime(input) + case "duration": + dur, err := ParseDuration(data) + if err != nil { + return nil, err + } + return Duration(dur), nil + case "uri": + return URI(data), nil + case "email": + return Email(data), nil + case "uuid": + return UUID(data), nil + case "uuid3": + return UUID3(data), nil + case "uuid4": + return UUID4(data), nil + case "uuid5": + return UUID5(data), nil + case "uuid7": + return UUID7(data), nil + case "hostname": + return Hostname(data), nil + case "ipv4": + return IPv4(data), nil + case "ipv6": + return IPv6(data), nil + case "cidr": + return CIDR(data), nil + case "mac": + return MAC(data), nil + case "isbn": + return ISBN(data), nil + case "isbn10": + return ISBN10(data), nil + case "isbn13": + return ISBN13(data), nil + case "creditcard": + return CreditCard(data), nil + case "ssn": + return SSN(data), nil + case "hexcolor": + return HexColor(data), nil + case "rgbcolor": + return RGBColor(data), nil + case "byte": + return Base64(data), nil + case "password": + return Password(data), nil + case "ulid": + ulid, err := ParseULID(data) + if err != nil { + return nil, err + } + return ulid, nil + default: + return nil, errors.InvalidTypeName(v.Name) + } + } + } + return data, nil + } +} + +// Add adds a new format, return true if this was a new item instead of a replacement +func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool { + f.Lock() + defer f.Unlock() + + nme := f.normalizeName(name) + + tpe := reflect.TypeOf(strfmt) + if tpe.Kind() == reflect.Ptr { + tpe = tpe.Elem() + } + + for i := range f.data { + v := &f.data[i] + if v.Name == nme { + v.Type = tpe + v.Validator = validator + return false + } + } + + // turns out it's new after all + f.data = append(f.data, knownFormat{Name: nme, OrigName: name, Type: tpe, Validator: validator}) + return true +} + +// GetType gets the type for the specified name +func (f *defaultFormats) GetType(name string) (reflect.Type, bool) { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + return v.Type, true + } + } + return nil, false +} + +// DelByName removes the format by the specified name, returns true when an item was actually removed +func (f *defaultFormats) DelByName(name string) bool { + f.Lock() + defer f.Unlock() + + nme := f.normalizeName(name) + + for i, v := range f.data { + if v.Name == nme { + f.data[i] = knownFormat{} // release + f.data = append(f.data[:i], f.data[i+1:]...) + return true + } + } + return false +} + +// DelByFormat removes the specified format, returns true when an item was actually removed +func (f *defaultFormats) DelByFormat(strfmt Format) bool { + f.Lock() + defer f.Unlock() + + tpe := reflect.TypeOf(strfmt) + if tpe.Kind() == reflect.Ptr { + tpe = tpe.Elem() + } + + for i, v := range f.data { + if v.Type == tpe { + f.data[i] = knownFormat{} // release + f.data = append(f.data[:i], f.data[i+1:]...) + return true + } + } + return false +} + +// ContainsName returns true if this registry contains the specified name +func (f *defaultFormats) ContainsName(name string) bool { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + return true + } + } + return false +} + +// ContainsFormat returns true if this registry contains the specified format +func (f *defaultFormats) ContainsFormat(strfmt Format) bool { + f.Lock() + defer f.Unlock() + tpe := reflect.TypeOf(strfmt) + if tpe.Kind() == reflect.Ptr { + tpe = tpe.Elem() + } + + for _, v := range f.data { + if v.Type == tpe { + return true + } + } + return false +} + +// Validates passed data against format. +// +// Note that the format name is automatically normalized, e.g. one may +// use "date-time" to use the "datetime" format validator. +func (f *defaultFormats) Validates(name, data string) bool { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + return v.Validator(data) + } + } + return false +} + +// Parse a string into the appropriate format representation type. +// +// E.g. parsing a string a "date" will return a Date type. +func (f *defaultFormats) Parse(name, data string) (any, error) { + f.Lock() + defer f.Unlock() + nme := f.normalizeName(name) + for _, v := range f.data { + if v.Name == nme { + nw := reflect.New(v.Type).Interface() + if dec, ok := nw.(encoding.TextUnmarshaler); ok { + if err := dec.UnmarshalText([]byte(data)); err != nil { + return nil, err + } + return nw, nil + } + return nil, errors.InvalidTypeName(name) + } + } + return nil, errors.InvalidTypeName(name) +} diff --git a/vendor/github.com/go-openapi/strfmt/ifaces.go b/vendor/github.com/go-openapi/strfmt/ifaces.go new file mode 100644 index 00000000000..1b9e72c64eb --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/ifaces.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "encoding" + "reflect" + + "github.com/go-viper/mapstructure/v2" +) + +// Format represents a string format. +// +// All implementations of Format provide a string representation and text +// marshaling/unmarshaling interface to be used by encoders (e.g. encoding/json). +type Format interface { + String() string + encoding.TextMarshaler + encoding.TextUnmarshaler +} + +// Registry is a registry of string formats, with a validation method. +type Registry interface { + Add(string, Format, Validator) bool + DelByName(string) bool + GetType(string) (reflect.Type, bool) + ContainsName(string) bool + Validates(string, string) bool + Parse(string, string) (any, error) + MapStructureHookFunc() mapstructure.DecodeHookFunc +} diff --git a/vendor/github.com/go-openapi/strfmt/mongo.go b/vendor/github.com/go-openapi/strfmt/mongo.go new file mode 100644 index 00000000000..641fed9b1a6 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/mongo.go @@ -0,0 +1,646 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "time" + + "github.com/oklog/ulid" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +var ( + _ bson.Marshaler = Date{} + _ bson.Unmarshaler = &Date{} + _ bson.Marshaler = Base64{} + _ bson.Unmarshaler = &Base64{} + _ bson.Marshaler = Duration(0) + _ bson.Unmarshaler = (*Duration)(nil) + _ bson.Marshaler = DateTime{} + _ bson.Unmarshaler = &DateTime{} + _ bson.Marshaler = ULID{} + _ bson.Unmarshaler = &ULID{} + _ bson.Marshaler = URI("") + _ bson.Unmarshaler = (*URI)(nil) + _ bson.Marshaler = Email("") + _ bson.Unmarshaler = (*Email)(nil) + _ bson.Marshaler = Hostname("") + _ bson.Unmarshaler = (*Hostname)(nil) + _ bson.Marshaler = IPv4("") + _ bson.Unmarshaler = (*IPv4)(nil) + _ bson.Marshaler = IPv6("") + _ bson.Unmarshaler = (*IPv6)(nil) + _ bson.Marshaler = CIDR("") + _ bson.Unmarshaler = (*CIDR)(nil) + _ bson.Marshaler = MAC("") + _ bson.Unmarshaler = (*MAC)(nil) + _ bson.Marshaler = Password("") + _ bson.Unmarshaler = (*Password)(nil) + _ bson.Marshaler = UUID("") + _ bson.Unmarshaler = (*UUID)(nil) + _ bson.Marshaler = UUID3("") + _ bson.Unmarshaler = (*UUID3)(nil) + _ bson.Marshaler = UUID4("") + _ bson.Unmarshaler = (*UUID4)(nil) + _ bson.Marshaler = UUID5("") + _ bson.Unmarshaler = (*UUID5)(nil) + _ bson.Marshaler = UUID7("") + _ bson.Unmarshaler = (*UUID7)(nil) + _ bson.Marshaler = ISBN("") + _ bson.Unmarshaler = (*ISBN)(nil) + _ bson.Marshaler = ISBN10("") + _ bson.Unmarshaler = (*ISBN10)(nil) + _ bson.Marshaler = ISBN13("") + _ bson.Unmarshaler = (*ISBN13)(nil) + _ bson.Marshaler = CreditCard("") + _ bson.Unmarshaler = (*CreditCard)(nil) + _ bson.Marshaler = SSN("") + _ bson.Unmarshaler = (*SSN)(nil) + _ bson.Marshaler = HexColor("") + _ bson.Unmarshaler = (*HexColor)(nil) + _ bson.Marshaler = RGBColor("") + _ bson.Unmarshaler = (*RGBColor)(nil) + _ bson.Marshaler = ObjectId{} + _ bson.Unmarshaler = &ObjectId{} + + _ bson.ValueMarshaler = DateTime{} + _ bson.ValueUnmarshaler = &DateTime{} + _ bson.ValueMarshaler = ObjectId{} + _ bson.ValueUnmarshaler = &ObjectId{} +) + +const ( + millisec = 1000 + microsec = 1_000_000 + bsonDateTimeSize = 8 +) + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (b Base64) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": b.String()}) +} + +// UnmarshalBSON document into this value +func (b *Base64) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if bd, ok := m["data"].(string); ok { + vb, err := base64.StdEncoding.DecodeString(bd) + if err != nil { + return err + } + *b = Base64(vb) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as base64: %w", ErrFormat) +} + +func (d Duration) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Duration) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := ParseDuration(data) + if err != nil { + return err + } + *d = Duration(rd) + return nil + } + + return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) +} + +// MarshalBSON renders the DateTime as a BSON document +func (t DateTime) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": t}) +} + +// UnmarshalBSON reads the DateTime from a BSON document +func (t *DateTime) UnmarshalBSON(data []byte) error { + var obj struct { + Data DateTime + } + + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + + *t = obj.Data + + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +// +// Marshals a DateTime as a bson.TypeDateTime, an int64 representing +// milliseconds since epoch. +func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { + // UnixNano cannot be used directly, the result of calling UnixNano on the zero + // Time is undefined. Thats why we use time.Nanosecond() instead. + + tNorm := NormalizeTimeForMarshal(time.Time(t)) + i64 := tNorm.Unix()*millisec + int64(tNorm.Nanosecond())/microsec + buf := make([]byte, bsonDateTimeSize) + binary.LittleEndian.PutUint64(buf, uint64(i64)) //nolint:gosec // it's okay to handle negative int64 this way + + return bson.TypeDateTime, buf, nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { + if tpe == bson.TypeNull { + *t = DateTime{} + return nil + } + + if len(data) != bsonDateTimeSize { + return fmt.Errorf("bson date field length not exactly %d bytes: %w", bsonDateTimeSize, ErrFormat) + } + + i64 := int64(binary.LittleEndian.Uint64(data)) //nolint:gosec // it's okay if we overflow and get a negative datetime + *t = DateTime(time.Unix(i64/millisec, i64%millisec*microsec)) + + return nil +} + +// MarshalBSON document from this value +func (u ULID) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ULID) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + id, err := ulid.ParseStrict(ud) + if err != nil { + return fmt.Errorf("couldn't parse bson bytes as ULID: %w: %w", err, ErrFormat) + } + u.ULID = id + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ULID: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u URI) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *URI) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = URI(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as uri: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (e Email) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": e.String()}) +} + +// UnmarshalBSON document into this value +func (e *Email) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *e = Email(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as email: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (h Hostname) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": h.String()}) +} + +// UnmarshalBSON document into this value +func (h *Hostname) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *h = Hostname(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as hostname: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u IPv4) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *IPv4) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = IPv4(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ipv4: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u IPv6) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *IPv6) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = IPv6(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ipv6: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u CIDR) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *CIDR) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = CIDR(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as CIDR: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u MAC) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *MAC) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = MAC(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as MAC: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (r Password) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": r.String()}) +} + +// UnmarshalBSON document into this value +func (r *Password) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *r = Password(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as Password: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID3) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID3) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID3(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID3: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID4) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID4) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID4(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID4: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID5) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID5) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID5(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID5: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u UUID7) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *UUID7) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = UUID7(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as UUID7: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u ISBN) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ISBN) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = ISBN(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ISBN: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u ISBN10) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ISBN10) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = ISBN10(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ISBN10: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u ISBN13) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *ISBN13) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = ISBN13(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as ISBN13: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u CreditCard) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *CreditCard) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = CreditCard(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as CreditCard: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (u SSN) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": u.String()}) +} + +// UnmarshalBSON document into this value +func (u *SSN) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *u = SSN(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as SSN: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (h HexColor) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": h.String()}) +} + +// UnmarshalBSON document into this value +func (h *HexColor) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *h = HexColor(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as HexColor: %w", ErrFormat) +} + +// MarshalBSON document from this value +func (r RGBColor) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": r.String()}) +} + +// UnmarshalBSON document into this value +func (r *RGBColor) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if ud, ok := m["data"].(string); ok { + *r = RGBColor(ud) + return nil + } + return fmt.Errorf("couldn't unmarshal bson bytes as RGBColor: %w", ErrFormat) +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bson.TypeObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go new file mode 100644 index 00000000000..8085aaf6965 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -0,0 +1,258 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" +) + +var ( + // UnixZero sets the zero unix UTC timestamp we want to compare against. + // + // Unix 0 for an EST timezone is not equivalent to a UTC timezone. + UnixZero = time.Unix(0, 0).UTC() +) + +func init() { + dt := DateTime{} + Default.Add("datetime", &dt, IsDateTime) +} + +// IsDateTime returns true when the string is a valid date-time. +// +// JSON datetime format consist of a date and a time separated by a "T", e.g. 2012-04-23T18:25:43.511Z. +func IsDateTime(str string) bool { + const ( + minDateTimeLength = 4 + minParts = 2 + ) + if len(str) < minDateTimeLength { + return false + } + s := strings.Split(strings.ToLower(str), "t") + if len(s) < minParts || !IsDate(s[0]) { + return false + } + + matches := rxDateTime.FindAllStringSubmatch(s[1], -1) + if len(matches) == 0 || len(matches[0]) == 0 { + return false + } + m := matches[0] + res := m[1] <= "23" && m[2] <= "59" && m[3] <= "59" + return res +} + +const ( + // RFC3339Millis represents a ISO8601 format to millis instead of to nanos + RFC3339Millis = "2006-01-02T15:04:05.000Z07:00" + // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos + RFC3339MillisNoColon = "2006-01-02T15:04:05.000Z0700" + // RFC3339Micro represents a ISO8601 format to micro instead of to nano + RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano + RFC3339MicroNoColon = "2006-01-02T15:04:05.000000Z0700" + // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone) + ISO8601LocalTime = "2006-01-02T15:04:05" + // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs) + ISO8601TimeWithReducedPrecision = "2006-01-02T15:04Z" + // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone) + ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" + // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. + ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" + // ISO8601TimeUniversalSortableDateTimePatternShortForm is the short form of ISO8601TimeUniversalSortableDateTimePattern + ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" + // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` +) + +var ( + rxDateTime = regexp.MustCompile(DateTimePattern) + + // DateTimeFormats is the collection of formats used by ParseDateTime() + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} + + // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) + MarshalFormat = RFC3339Millis + + // NormalizeTimeForMarshal provides a normalization function on time before marshalling (e.g. time.UTC). + // By default, the time value is not changed. + NormalizeTimeForMarshal = func(t time.Time) time.Time { return t } + + // DefaultTimeLocation provides a location for a time when the time zone is not encoded in the string (ex: ISO8601 Local variants). + DefaultTimeLocation = time.UTC +) + +// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch +func ParseDateTime(data string) (DateTime, error) { + if data == "" { + return NewDateTime(), nil + } + var lastError error + for _, layout := range DateTimeFormats { + dd, err := time.ParseInLocation(layout, data, DefaultTimeLocation) + if err != nil { + lastError = err + continue + } + return DateTime(dd), nil + } + return DateTime{}, lastError +} + +// DateTime is a time but it serializes to ISO8601 format with millis. +// +// It knows how to read 3 different variations of a RFC3339 date time. +// Most APIs we encounter want either millisecond or second precision times. +// This just tries to make it worry-free. +// +// swagger:strfmt date-time +type DateTime time.Time + +// NewDateTime is a representation of the UNIX epoch (January 1, 1970 00:00:00 UTC) for the [DateTime] type. +// +// Notice that this is not the zero value of the [DateTime] type. +// +// You may use [DateTime.IsUNIXZero] to check against this value. +func NewDateTime() DateTime { + return DateTime(time.Unix(0, 0).UTC()) +} + +// MakeDateTime is a representation of the zero value of the [DateTime] type (January 1, year 1, 00:00:00 UTC). +// +// You may use [Datetime.IsZero] to check against this value. +func MakeDateTime() DateTime { + return DateTime(time.Time{}) +} + +// String converts this time to a string +func (t DateTime) String() string { + return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat) +} + +// IsZero returns whether the date time is a zero value +func (t DateTime) IsZero() bool { + return time.Time(t).IsZero() +} + +// IsUnixZero returns whether the date time is equivalent to time.Unix(0, 0).UTC(). +func (t DateTime) IsUnixZero() bool { + return time.Time(t).Equal(UnixZero) +} + +// MarshalText implements the text marshaller interface +func (t DateTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the text unmarshaller interface +func (t *DateTime) UnmarshalText(text []byte) error { + tt, err := ParseDateTime(string(text)) + if err != nil { + return err + } + *t = tt + return nil +} + +// Scan scans a DateTime value from database driver type. +func (t *DateTime) Scan(raw any) error { + // TODO: case int64: and case float64: ? + switch v := raw.(type) { + case []byte: + return t.UnmarshalText(v) + case string: + return t.UnmarshalText([]byte(v)) + case time.Time: + *t = DateTime(v) + case nil: + *t = DateTime{} + default: + return fmt.Errorf("cannot sql.Scan() strfmt.DateTime from: %#v: %w", v, ErrFormat) + } + + return nil +} + +// Value converts DateTime to a primitive value ready to written to a database. +func (t DateTime) Value() (driver.Value, error) { + return driver.Value(t.String()), nil +} + +// MarshalJSON returns the DateTime as JSON +func (t DateTime) MarshalJSON() ([]byte, error) { + return json.Marshal(NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)) +} + +// UnmarshalJSON sets the DateTime from JSON +func (t *DateTime) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + + var tstr string + if err := json.Unmarshal(data, &tstr); err != nil { + return err + } + tt, err := ParseDateTime(tstr) + if err != nil { + return err + } + *t = tt + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (t *DateTime) DeepCopyInto(out *DateTime) { + *out = *t +} + +// DeepCopy copies the receiver into a new DateTime. +func (t *DateTime) DeepCopy() *DateTime { + if t == nil { + return nil + } + out := new(DateTime) + t.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (t DateTime) GobEncode() ([]byte, error) { + return t.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (t *DateTime) GobDecode(data []byte) error { + return t.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (t DateTime) MarshalBinary() ([]byte, error) { + return NormalizeTimeForMarshal(time.Time(t)).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (t *DateTime) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *t = DateTime(original) + + return nil +} + +// Equal checks if two DateTime instances are equal using time.Time's Equal method +func (t DateTime) Equal(t2 DateTime) bool { + return time.Time(t).Equal(time.Time(t2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/ulid.go b/vendor/github.com/go-openapi/strfmt/ulid.go new file mode 100644 index 00000000000..85c5b53e6c7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/ulid.go @@ -0,0 +1,208 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package strfmt + +import ( + cryptorand "crypto/rand" + "database/sql/driver" + "encoding/json" + "fmt" + "io" + "sync" + + "github.com/oklog/ulid" +) + +// ULID represents a ulid string format +// ref: +// +// https://github.com/ulid/spec +// +// impl: +// +// https://github.com/oklog/ulid +// +// swagger:strfmt ulid +type ULID struct { + ulid.ULID +} + +var ( + ulidEntropyPool = sync.Pool{ + New: func() any { + return cryptorand.Reader + }, + } + + ULIDScanDefaultFunc = func(raw any) (ULID, error) { + u := NewULIDZero() + switch x := raw.(type) { + case nil: + // zerp ulid + return u, nil + case string: + if x == "" { + // zero ulid + return u, nil + } + return u, u.UnmarshalText([]byte(x)) + case []byte: + return u, u.UnmarshalText(x) + } + + return u, fmt.Errorf("cannot sql.Scan() strfmt.ULID from: %#v: %w", raw, ulid.ErrScanValue) + } + + // ULIDScanOverrideFunc allows you to override the Scan method of the ULID type + ULIDScanOverrideFunc = ULIDScanDefaultFunc + + ULIDValueDefaultFunc = func(u ULID) (driver.Value, error) { + return driver.Value(u.String()), nil + } + + // ULIDValueOverrideFunc allows you to override the Value method of the ULID type + ULIDValueOverrideFunc = ULIDValueDefaultFunc +) + +func init() { + // register formats in the default registry: + // - ulid + ulid := ULID{} + Default.Add("ulid", &ulid, IsULID) +} + +// IsULID checks if provided string is ULID format +// Be noticed that this function considers overflowed ULID as non-ulid. +// For more details see https://github.com/ulid/spec +func IsULID(str string) bool { + _, err := ulid.ParseStrict(str) + return err == nil +} + +// ParseULID parses a string that represents an valid ULID +func ParseULID(str string) (ULID, error) { + var u ULID + + return u, u.UnmarshalText([]byte(str)) +} + +// NewULIDZero returns a zero valued ULID type +func NewULIDZero() ULID { + return ULID{} +} + +// NewULID generates new unique ULID value and a error if any +func NewULID() (ULID, error) { + var u ULID + + obj := ulidEntropyPool.Get() + entropy, ok := obj.(io.Reader) + if !ok { + return u, fmt.Errorf("failed to cast %+v to io.Reader: %w", obj, ErrFormat) + } + + id, err := ulid.New(ulid.Now(), entropy) + if err != nil { + return u, err + } + ulidEntropyPool.Put(entropy) + + u.ULID = id + return u, nil +} + +// GetULID returns underlying instance of ULID +func (u *ULID) GetULID() any { + return u.ULID +} + +// MarshalText returns this instance into text +func (u ULID) MarshalText() ([]byte, error) { + return u.ULID.MarshalText() +} + +// UnmarshalText hydrates this instance from text +func (u *ULID) UnmarshalText(data []byte) error { // validation is performed later on + return u.ULID.UnmarshalText(data) +} + +// Scan reads a value from a database driver +func (u *ULID) Scan(raw any) error { + ul, err := ULIDScanOverrideFunc(raw) + if err == nil { + *u = ul + } + return err +} + +// Value converts a value to a database driver value +func (u ULID) Value() (driver.Value, error) { + return ULIDValueOverrideFunc(u) +} + +func (u ULID) String() string { + return u.ULID.String() +} + +// MarshalJSON returns the ULID as JSON +func (u ULID) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +// UnmarshalJSON sets the ULID from JSON +func (u *ULID) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + id, err := ulid.ParseStrict(ustr) + if err != nil { + return fmt.Errorf("couldn't parse JSON value as ULID: %w", err) + } + u.ULID = id + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (u *ULID) DeepCopyInto(out *ULID) { + *out = *u +} + +// DeepCopy copies the receiver into a new ULID. +func (u *ULID) DeepCopy() *ULID { + if u == nil { + return nil + } + out := new(ULID) + u.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (u ULID) GobEncode() ([]byte, error) { + return u.ULID.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (u *ULID) GobDecode(data []byte) error { + return u.ULID.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u ULID) MarshalBinary() ([]byte, error) { + return u.ULID.MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (u *ULID) UnmarshalBinary(data []byte) error { + return u.ULID.UnmarshalBinary(data) +} + +// Equal checks if two ULID instances are equal by their underlying type +func (u ULID) Equal(other ULID) bool { + return u.ULID == other.ULID +} diff --git a/vendor/github.com/go-openapi/swag/.codecov.yml b/vendor/github.com/go-openapi/swag/.codecov.yml new file mode 100644 index 00000000000..3354f44b28e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.codecov.yml @@ -0,0 +1,4 @@ +ignore: + - jsonutils/fixtures_test + - jsonutils/adapters/ifaces/mocks + - jsonutils/adapters/testintegration/benchmarks diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 00000000000..49ad52766ab --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore new file mode 100644 index 00000000000..c4b1b64f04e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +vendor +Godeps +.idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml new file mode 100644 index 00000000000..126264a6b89 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -0,0 +1,78 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gomoddirectives + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - modernize + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tagliatelle + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/swag/.mockery.yml b/vendor/github.com/go-openapi/swag/.mockery.yml new file mode 100644 index 00000000000..8557cb58d33 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.mockery.yml @@ -0,0 +1,30 @@ +all: false +dir: '{{.InterfaceDir}}' +filename: mocks_test.go +force-file-write: true +formatter: goimports +include-auto-generated: false +log-level: info +structname: '{{.Mock}}{{.InterfaceName}}' +pkgname: '{{.SrcPackageName}}' +recursive: false +require-template-schema-exists: true +template: matryer +template-schema: '{{.Template}}.schema.json' +packages: + github.com/go-openapi/swag/jsonutils/adapters/ifaces: + config: + dir: jsonutils/adapters/ifaces/mocks + filename: mocks.go + pkgname: 'mocks' + force-file-write: true + all: true + github.com/go-openapi/swag/jsonutils/adapters/testintegration: + config: + inpackage: true + dir: jsonutils/adapters/testintegration + force-file-write: true + all: true + interfaces: + EJMarshaler: + EJUnmarshaler: diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md new file mode 100644 index 00000000000..371fd55fdc3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/README.md @@ -0,0 +1,239 @@ +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](https://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) + +Package `swag` contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + +> **NOTE** +> `swag` is one of the foundational building blocks of the go-openapi initiative. +> Most repositories in `github.com/go-openapi/...` depend on it in some way. +> And so does our CLI tool `github.com/go-swagger/go-swagger`, +> as well as the code generated by this tool. + +* [Contents](#contents) +* [Dependencies](#dependencies) +* [Release Notes](#release-notes) +* [Licensing](#licensing) +* [Note to contributors](#note-to-contributors) +* [TODOs, suggestions and plans](#todos-suggestions-and-plans) + +## Contents + +`go-openapi/swag` exposes a collection of relatively independent modules. + +Moving forward, no additional feature will be added to the `swag` API directly at the root package level, +which remains there for backward-compatibility purposes. All exported top-level features are now deprecated. + +Child modules will continue to evolve and some new ones may be added in the future. + +| Module | Content | Main features | +|---------------|---------|---------------| +| `cmdutils` | utilities to work with CLIs || +| `conv` | type conversion utilities | convert between values and pointers for any types
convert from string to builtin types (wraps `strconv`)
require `./typeutils` (test dependency)
| +| `fileutils` | file utilities | | +| `jsonname` | JSON utilities | infer JSON names from `go` properties
| +| `jsonutils` | JSON utilities | fast json concatenation
read and write JSON from and to dynamic `go` data structures
~require `github.com/mailru/easyjson`~
| +| `loading` | file loading | load from file or http
require `./yamlutils`
| +| `mangling` | safe name generation | name mangling for `go`
| +| `netutils` | networking utilities | host, port from address
| +| `stringutils` | `string` utilities | search in slice (with case-insensitive)
split/join query parameters as arrays
| +| `typeutils` | `go` types utilities | check the zero value for any type
safe check for a nil value
| +| `yamlutils` | YAML utilities | converting YAML to JSON
loading YAML into a dynamic YAML document
maintaining the original order of keys in YAML objects
require `./jsonutils`
~require `github.com/mailru/easyjson`~
require `go.yaml.in/yaml/v3`
| + +--- + +## Dependencies + +The root module `github.com/go-openapi/swag` at the repo level maintains a few +dependencies outside of the standard library. + +* YAML utilities depend on `go.yaml.in/yaml/v3` +* JSON utilities depend on their registered adapter module: + * by default, only the standard library is used + * `github.com/mailru/easyjson` is now only a dependency for module + `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, + for users willing to import that module. + * integration tests and benchmarks use all the dependencies are published as their own module +* other dependencies are test dependencies drawn from `github.com/stretchr/testify` + +## Release notes + +### v0.25.4 + +** mangling** + +Bug fix + +* [x] mangler may panic with pluralized overlapping initialisms + +Tests + +* [x] introduced fuzz tests + +### v0.25.3 + +** mangling** + +Bug fix + +* [x] mangler may panic with pluralized initialisms + +### v0.25.2 + +Minor changes due to internal maintenance that don't affect the behavior of the library. + +* [x] removed indirect test dependencies by switching all tests to `go-openapi/testify`, + a fork of `stretch/testify` with zero-dependencies. +* [x] improvements to CI to catch test reports. +* [x] modernized licensing annotations in source code, using the more compact SPDX annotations + rather than the full license terms. +* [x] simplified a bit JSON & YAML testing by using newly available assertions +* started the journey to an OpenSSF score card badge: + * [x] explicited permissions in CI workflows + * [x] published security policy + * pinned dependencies to github actions + * introduced fuzzing in tests + +### v0.25.1 + +* fixes a data race that could occur when using the standard library implementation of a JSON ordered map + +### v0.25.0 + +**New with this release**: + +* requires `go1.24`, as iterators are being introduced +* removes the dependency to `mailru/easyjson` by default (#68) + * functionality remains the same, but performance may somewhat degrade for applications + that relied on `easyjson` + * users of the JSON or YAML utilities who want to use `easyjson` as their preferred JSON serializer library + will be able to do so by registering this the corresponding JSON adapter at runtime. See below. + * ordered keys in JSON and YAML objects: this feature used to rely solely on `easyjson`. + With this release, an implementation relying on the standard `encoding/json` is provided. + * an independent [benchmark](./jsonutils/adapters/testintegration/benchmarks/README.md) to compare the different adapters +* improves the "float is integer" check (`conv.IsFloat64AJSONInteger`) (#59) +* removes the _direct_ dependency to `gopkg.in/yaml.v3` (indirect dependency is still incurred through `stretchr/testify`) (#127) +* exposed `conv.IsNil()` (previously kept private): a safe nil check (accounting for the "non-nil interface with nil value" nonsensical go trick) + +**What coming next?** + +Moving forward, we want to : +* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. +* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other + similar libraries may be interesting too. + + +**How to explicitly register a dependency at runtime**? + +The following would maintain how JSON utilities proposed by `swag` used work, up to `v0.24.1`. + + ```go + import ( + "github.com/go-openapi/swag/jsonutils/adapters" + easyjson "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + ) + + func init() { + easyjson.Register(adapters.Registry) + } + ``` + +Subsequent calls to `jsonutils.ReadJSON()` or `jsonutils.WriteJSON()` will switch to `easyjson` +whenever the passed data structures implement the `easyjson.Unmarshaler` or `easyjson.Marshaler` respectively, +or fallback to the standard library. + +For more details, you may also look at our +[integration tests](jsonutils/adapters/testintegration/integration_suite_test.go#29). + +### v0.24.0 + +With this release, we have largely modernized the API of `swag`: + +* The traditional `swag` API is still supported: code that imports `swag` will still + compile and work the same. +* A deprecation notice is published to encourage consumers of this library to adopt + the newer API +* **Deprecation notice** + * configuration through global variables is now deprecated, in favor of options passed as parameters + * all helper functions are moved to more specialized packages, which are exposed as + go modules. Importing such a module would reduce the footprint of dependencies. + * _all_ functions, variables, constants exposed by the deprecated API have now moved, so + that consumers of the new API no longer need to import github.com/go-openapi/swag, but + should import the desired sub-module(s). + +**New with this release**: + +* [x] type converters and pointer to value helpers now support generic types +* [x] name mangling now support pluralized initialisms (issue #46) + Strings like "contact IDs" are now recognized as such a plural form and mangled as a linter would expect. +* [x] performance: small improvements to reduce the overhead of convert/format wrappers (see issues #110, or PR #108) +* [x] performance: name mangling utilities run ~ 10% faster (PR #115) + +--- + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Note to contributors + +A mono-repo structure comes with some unavoidable extra pains... + +* Testing + +> The usual `go test ./...` command, run from the root of this repo won't work any longer to test all submodules. +> +> Each module constitutes an independant unit of test. So you have to run `go test` inside each module. +> Or you may take a look at how this is achieved by CI +> [here] https://github.com/go-openapi/swag/blob/master/.github/workflows/go-test.yml). +> +> There are also some alternative tricks using `go work`, for local development, if you feel comfortable with +> go workspaces. Perhaps some day, we'll have a `go work test` to run all tests without any hack. + +* Releasing + +> Each module follows its own independant module versioning. +> +> So you have tags like `mangling/v0.24.0`, `fileutils/v0.24.0` etc that are used by `go mod` and `go get` +> to refer to the tagged version of each module specifically. +> +> This means we may release patches etc to each module independently. +> +> We'd like to adopt the rule that modules in this repo would only differ by a patch version +> (e.g. `v0.24.5` vs `v0.24.3`), and we'll level all modules whenever a minor version is introduced. +> +> A script in `./hack` is provided to tag all modules with the same version in one go. + +* Continuous integration + +> At this moment, all tests in all modules are systematically run over the full test matrix (3 platform x 2 go releases). +> This generates quite a lot of jobs. +> +> We ought to reduce the number of jobs required to test a PR focused on only a few modules. + +## Todos, suggestions and plans + +All kinds of contributions are welcome. + +A few ideas: + +* [x] Complete the split of dependencies to isolate easyjson from the rest +* [x] Improve CI to reduce needed tests +* [x] Replace dependency to `gopkg.in/yaml.v3` (`yamlutil`) +* [ ] Improve mangling utilities (improve readability, support for capitalized words, + better word substitution for non-letter symbols...) +* [ ] Move back to this common shared pot a few of the technical features introduced by go-swagger independently + (e.g. mangle go package names, search package with go modules support, ...) +* [ ] Apply a similar mono-repo approach to go-openapi/strfmt which suffer from similar woes: bloated API, + imposed dependency to some database driver. +* [ ] Adapt `go-swagger` (incl. generated code) to the new `swag` API. +* [ ] Factorize some tests, as there is a lot of redundant testing code in `jsonutils` +* [ ] Benchmark & profiling: publish independently the tool built to analyze and chart benchmarks (e.g. similar to `benchvisual`) +* [ ] more thorough testing for nil / null case +* [ ] ci pipeline to manage releases +* [ ] cleaner mockery generation (doesn't work out of the box for all sub-modules) diff --git a/vendor/github.com/go-openapi/swag/SECURITY.md b/vendor/github.com/go-openapi/swag/SECURITY.md new file mode 100644 index 00000000000..72296a83135 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.25.x | :white_check_mark: | + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +please report it privately to the maintainers. + +Please follow the instructions provided by github to +[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). + +TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". diff --git a/vendor/github.com/go-openapi/swag/cmdutils/LICENSE b/vendor/github.com/go-openapi/swag/cmdutils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go new file mode 100644 index 00000000000..6c7bbb26f03 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package cmdutils + +// CommandLineOptionsGroup represents a group of user-defined command line options. +// +// This is for instance used to configure command line arguments in API servers generated by go-swagger. +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options any +} diff --git a/vendor/github.com/go-openapi/swag/cmdutils/doc.go b/vendor/github.com/go-openapi/swag/cmdutils/doc.go new file mode 100644 index 00000000000..31f2c37538a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package cmdutils brings helpers for CLIs produced by go-openapi +package cmdutils diff --git a/vendor/github.com/go-openapi/swag/cmdutils_iface.go b/vendor/github.com/go-openapi/swag/cmdutils_iface.go new file mode 100644 index 00000000000..bd0c1fc1280 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/cmdutils_iface.go @@ -0,0 +1,11 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/cmdutils" + +// CommandLineOptionsGroup represents a group of user-defined command line options. +// +// Deprecated: use [cmdutils.CommandLineOptionsGroup] instead. +type CommandLineOptionsGroup = cmdutils.CommandLineOptionsGroup diff --git a/vendor/github.com/go-openapi/swag/conv/LICENSE b/vendor/github.com/go-openapi/swag/conv/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/conv/convert.go b/vendor/github.com/go-openapi/swag/conv/convert.go new file mode 100644 index 00000000000..f205c391345 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/convert.go @@ -0,0 +1,161 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 +) + +// IsFloat64AJSONInteger allows for integers [-2^53, 2^53-1] inclusive. +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + rounded := math.Round(f) + if f == rounded { + return true + } + if rounded == 0 { // f = 0.0 exited above + return false + } + + diff := math.Abs(f - rounded) + if diff == 0 { + return true + } + + // relative error Abs{f - Round(f)) / Round(f)} < ε ; Round(f) + return diff < epsilon*math.Abs(rounded) +} + +// ConvertFloat turns a string into a float numerical value. +func ConvertFloat[T Float](str string) (T, error) { + var v T + f, err := strconv.ParseFloat(str, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertInteger turns a string into a signed integer. +func ConvertInteger[T Signed](str string) (T, error) { + var v T + f, err := strconv.ParseInt(str, 10, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertUinteger turns a string into an unsigned integer. +func ConvertUinteger[T Unsigned](str string) (T, error) { + var v T + f, err := strconv.ParseUint(str, 10, bitsize(v)) + if err != nil { + return 0, err + } + + return T(f), nil +} + +// ConvertBool turns a string into a boolean. +// +// It supports a few more "true" strings than [strconv.ParseBool]: +// +// - it is not case sensitive ("trUe" or "FalsE" work) +// - "ok", "yes", "y", "on", "selected", "checked", "enabled" are all true +// - everything that is not true is false: there is never an actual error returned +func ConvertBool(str string) (bool, error) { + switch strings.ToLower(str) { + case "true", + "1", + "yes", + "ok", + "y", + "on", + "selected", + "checked", + "t", + "enabled": + return true, nil + default: + return false, nil + } +} + +// ConvertFloat32 turns a string into a float32. +func ConvertFloat32(str string) (float32, error) { return ConvertFloat[float32](str) } + +// ConvertFloat64 turns a string into a float64 +func ConvertFloat64(str string) (float64, error) { return ConvertFloat[float64](str) } + +// ConvertInt8 turns a string into an int8 +func ConvertInt8(str string) (int8, error) { return ConvertInteger[int8](str) } + +// ConvertInt16 turns a string into an int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turns a string into an int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turns a string into an int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turns a string into an uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turns a string into an uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turns a string into an uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turns a string into an uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} diff --git a/vendor/github.com/go-openapi/swag/conv/convert_types.go b/vendor/github.com/go-openapi/swag/conv/convert_types.go new file mode 100644 index 00000000000..cf4c6495ebc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/convert_types.go @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +// Unlicensed credits (idea, concept) +// +// The idea to convert values to pointers and the other way around, was inspired, eons ago, by the aws go sdk. +// +// Nowadays, all sensible API sdk's expose a similar functionality. + +// Pointer returns a pointer to the value passed in. +func Pointer[T any](v T) *T { + return &v +} + +// Value returns a shallow copy of the value of the pointer passed in. +// +// If the pointer is nil, the returned value is the zero value. +func Value[T any](v *T) T { + if v != nil { + return *v + } + + var zero T + return zero +} + +// PointerSlice converts a slice of values into a slice of pointers. +func PointerSlice[T any](src []T) []*T { + dst := make([]*T, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// ValueSlice converts a slice of pointers into a slice of values. +// +// nil elements are zero values. +func ValueSlice[T any](src []*T) []T { + dst := make([]T, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// PointerMap converts a map of values into a map of pointers. +func PointerMap[K comparable, T any](src map[K]T) map[K]*T { + dst := make(map[K]*T) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// ValueMap converts a map of pointers into a map of values. +// +// nil elements are skipped. +func ValueMap[K comparable, T any](src map[K]*T) map[K]T { + dst := make(map[K]T) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/conv/doc.go b/vendor/github.com/go-openapi/swag/conv/doc.go new file mode 100644 index 00000000000..1bd6ead6e2d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/doc.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package conv exposes utilities to convert types. +// +// The Convert and Format families of functions are essentially a shorthand to [strconv] functions, +// using the decimal representation of numbers. +// +// Features: +// +// - from string representation to value ("Convert*") and reciprocally ("Format*") +// - from pointer to value ([Value]) and reciprocally ([Pointer]) +// - from slice of values to slice of pointers ([PointerSlice]) and reciprocally ([ValueSlice]) +// - from map of values to map of pointers ([PointerMap]) and reciprocally ([ValueMap]) +package conv diff --git a/vendor/github.com/go-openapi/swag/conv/format.go b/vendor/github.com/go-openapi/swag/conv/format.go new file mode 100644 index 00000000000..5b87b8e146b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/format.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +import ( + "strconv" +) + +// FormatInteger turns an integer type into a string. +func FormatInteger[T Signed](value T) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatUinteger turns an unsigned integer type into a string. +func FormatUinteger[T Unsigned](value T) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatFloat turns a floating point numerical value into a string. +func FormatFloat[T Float](value T) string { + return strconv.FormatFloat(float64(value), 'f', -1, bitsize(value)) +} + +// FormatBool turns a boolean into a string. +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} diff --git a/vendor/github.com/go-openapi/swag/conv/sizeof.go b/vendor/github.com/go-openapi/swag/conv/sizeof.go new file mode 100644 index 00000000000..49434655738 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/sizeof.go @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +import "unsafe" + +// bitsize returns the size in bits of a type. +// +// NOTE: [unsafe.SizeOf] simply returns the size in bytes of the value. +// For primitive types T, the generic stencil is precompiled and this value +// is resolved at compile time, resulting in an immediate call to [strconv.ParseFloat]. +// +// We may leave up to the go compiler to simplify this function into a +// constant value, which happens in practice at least for primitive types +// (e.g. numerical types). +func bitsize[T Numerical](value T) int { + const bitsPerByte = 8 + return int(unsafe.Sizeof(value)) * bitsPerByte +} diff --git a/vendor/github.com/go-openapi/swag/conv/type_constraints.go b/vendor/github.com/go-openapi/swag/conv/type_constraints.go new file mode 100644 index 00000000000..81135e827e5 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv/type_constraints.go @@ -0,0 +1,29 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package conv + +type ( + // these type constraints are redefined after golang.org/x/exp/constraints, + // because importing that package causes an undesired go upgrade. + + // Signed integer types, cf. [golang.org/x/exp/constraints.Signed] + Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 + } + + // Unsigned integer types, cf. [golang.org/x/exp/constraints.Unsigned] + Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr + } + + // Float numerical types, cf. [golang.org/x/exp/constraints.Float] + Float interface { + ~float32 | ~float64 + } + + // Numerical types + Numerical interface { + Signed | Unsigned | Float + } +) diff --git a/vendor/github.com/go-openapi/swag/conv_iface.go b/vendor/github.com/go-openapi/swag/conv_iface.go new file mode 100644 index 00000000000..eea7b2e56e3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/conv_iface.go @@ -0,0 +1,486 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "time" + + "github.com/go-openapi/swag/conv" +) + +// IsFloat64AJSONInteger allows for integers [-2^53, 2^53-1] inclusive. +// +// Deprecated: use [conv.IsFloat64AJSONInteger] instead. +func IsFloat64AJSONInteger(f float64) bool { return conv.IsFloat64AJSONInteger(f) } + +// ConvertBool turns a string into a boolean. +// +// Deprecated: use [conv.ConvertBool] instead. +func ConvertBool(str string) (bool, error) { return conv.ConvertBool(str) } + +// ConvertFloat32 turns a string into a float32. +// +// Deprecated: use [conv.ConvertFloat32] instead. Alternatively, you may use the generic version [conv.ConvertFloat]. +func ConvertFloat32(str string) (float32, error) { return conv.ConvertFloat[float32](str) } + +// ConvertFloat64 turns a string into a float64. +// +// Deprecated: use [conv.ConvertFloat64] instead. Alternatively, you may use the generic version [conv.ConvertFloat]. +func ConvertFloat64(str string) (float64, error) { return conv.ConvertFloat[float64](str) } + +// ConvertInt8 turns a string into an int8. +// +// Deprecated: use [conv.ConvertInt8] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt8(str string) (int8, error) { return conv.ConvertInteger[int8](str) } + +// ConvertInt16 turns a string into an int16. +// +// Deprecated: use [conv.ConvertInt16] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt16(str string) (int16, error) { return conv.ConvertInteger[int16](str) } + +// ConvertInt32 turns a string into an int32. +// +// Deprecated: use [conv.ConvertInt32] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt32(str string) (int32, error) { return conv.ConvertInteger[int32](str) } + +// ConvertInt64 turns a string into an int64. +// +// Deprecated: use [conv.ConvertInt64] instead. Alternatively, you may use the generic version [conv.ConvertInteger]. +func ConvertInt64(str string) (int64, error) { return conv.ConvertInteger[int64](str) } + +// ConvertUint8 turns a string into an uint8. +// +// Deprecated: use [conv.ConvertUint8] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint8(str string) (uint8, error) { return conv.ConvertUinteger[uint8](str) } + +// ConvertUint16 turns a string into an uint16. +// +// Deprecated: use [conv.ConvertUint16] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint16(str string) (uint16, error) { return conv.ConvertUinteger[uint16](str) } + +// ConvertUint32 turns a string into an uint32. +// +// Deprecated: use [conv.ConvertUint32] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint32(str string) (uint32, error) { return conv.ConvertUinteger[uint32](str) } + +// ConvertUint64 turns a string into an uint64. +// +// Deprecated: use [conv.ConvertUint64] instead. Alternatively, you may use the generic version [conv.ConvertUinteger]. +func ConvertUint64(str string) (uint64, error) { return conv.ConvertUinteger[uint64](str) } + +// FormatBool turns a boolean into a string. +// +// Deprecated: use [conv.FormatBool] instead. +func FormatBool(value bool) string { return conv.FormatBool(value) } + +// FormatFloat32 turns a float32 into a string. +// +// Deprecated: use [conv.FormatFloat] instead. +func FormatFloat32(value float32) string { return conv.FormatFloat(value) } + +// FormatFloat64 turns a float64 into a string. +// +// Deprecated: use [conv.FormatFloat] instead. +func FormatFloat64(value float64) string { return conv.FormatFloat(value) } + +// FormatInt8 turns an int8 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt8(value int8) string { return conv.FormatInteger(value) } + +// FormatInt16 turns an int16 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt16(value int16) string { return conv.FormatInteger(value) } + +// FormatInt32 turns an int32 into a string +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt32(value int32) string { return conv.FormatInteger(value) } + +// FormatInt64 turns an int64 into a string. +// +// Deprecated: use [conv.FormatInteger] instead. +func FormatInt64(value int64) string { return conv.FormatInteger(value) } + +// FormatUint8 turns an uint8 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint8(value uint8) string { return conv.FormatUinteger(value) } + +// FormatUint16 turns an uint16 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint16(value uint16) string { return conv.FormatUinteger(value) } + +// FormatUint32 turns an uint32 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint32(value uint32) string { return conv.FormatUinteger(value) } + +// FormatUint64 turns an uint64 into a string. +// +// Deprecated: use [conv.FormatUinteger] instead. +func FormatUint64(value uint64) string { return conv.FormatUinteger(value) } + +// String turn a pointer to of the string value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func String(v string) *string { return conv.Pointer(v) } + +// StringValue turn the value of the string pointer passed in or +// "" if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func StringValue(v *string) string { return conv.Value(v) } + +// StringSlice converts a slice of string values into a slice of string pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func StringSlice(src []string) []*string { return conv.PointerSlice(src) } + +// StringValueSlice converts a slice of string pointers into a slice of string values. +// +// Deprecated: use [conv.ValueSlice] instead. +func StringValueSlice(src []*string) []string { return conv.ValueSlice(src) } + +// StringMap converts a string map of string values into a string map of string pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func StringMap(src map[string]string) map[string]*string { return conv.PointerMap(src) } + +// StringValueMap converts a string map of string pointers into a string map of string values. +// +// Deprecated: use [conv.ValueMap] instead. +func StringValueMap(src map[string]*string) map[string]string { return conv.ValueMap(src) } + +// Bool turn a pointer to of the bool value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Bool(v bool) *bool { return conv.Pointer(v) } + +// BoolValue turn the value of the bool pointer passed in or false if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func BoolValue(v *bool) bool { return conv.Value(v) } + +// BoolSlice converts a slice of bool values into a slice of bool pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func BoolSlice(src []bool) []*bool { return conv.PointerSlice(src) } + +// BoolValueSlice converts a slice of bool pointers into a slice of bool values. +// +// Deprecated: use [conv.ValueSlice] instead. +func BoolValueSlice(src []*bool) []bool { return conv.ValueSlice(src) } + +// BoolMap converts a string map of bool values into a string map of bool pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func BoolMap(src map[string]bool) map[string]*bool { return conv.PointerMap(src) } + +// BoolValueMap converts a string map of bool pointers into a string map of bool values. +// +// Deprecated: use [conv.ValueMap] instead. +func BoolValueMap(src map[string]*bool) map[string]bool { return conv.ValueMap(src) } + +// Int turn a pointer to of the int value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int(v int) *int { return conv.Pointer(v) } + +// IntValue turn the value of the int pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func IntValue(v *int) int { return conv.Value(v) } + +// IntSlice converts a slice of int values into a slice of int pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func IntSlice(src []int) []*int { return conv.PointerSlice(src) } + +// IntValueSlice converts a slice of int pointers into a slice of int values. +// +// Deprecated: use [conv.ValueSlice] instead. +func IntValueSlice(src []*int) []int { return conv.ValueSlice(src) } + +// IntMap converts a string map of int values into a string map of int pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func IntMap(src map[string]int) map[string]*int { return conv.PointerMap(src) } + +// IntValueMap converts a string map of int pointers into a string map of int values. +// +// Deprecated: use [conv.ValueMap] instead. +func IntValueMap(src map[string]*int) map[string]int { return conv.ValueMap(src) } + +// Int32 turn a pointer to of the int32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int32(v int32) *int32 { return conv.Pointer(v) } + +// Int32Value turn the value of the int32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Int32Value(v *int32) int32 { return conv.Value(v) } + +// Int32Slice converts a slice of int32 values into a slice of int32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Int32Slice(src []int32) []*int32 { return conv.PointerSlice(src) } + +// Int32ValueSlice converts a slice of int32 pointers into a slice of int32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Int32ValueSlice(src []*int32) []int32 { return conv.ValueSlice(src) } + +// Int32Map converts a string map of int32 values into a string map of int32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Int32Map(src map[string]int32) map[string]*int32 { return conv.PointerMap(src) } + +// Int32ValueMap converts a string map of int32 pointers into a string map of int32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Int32ValueMap(src map[string]*int32) map[string]int32 { return conv.ValueMap(src) } + +// Int64 turn a pointer to of the int64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Int64(v int64) *int64 { return conv.Pointer(v) } + +// Int64Value turn the value of the int64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Int64Value(v *int64) int64 { return conv.Value(v) } + +// Int64Slice converts a slice of int64 values into a slice of int64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Int64Slice(src []int64) []*int64 { return conv.PointerSlice(src) } + +// Int64ValueSlice converts a slice of int64 pointers into a slice of int64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Int64ValueSlice(src []*int64) []int64 { return conv.ValueSlice(src) } + +// Int64Map converts a string map of int64 values into a string map of int64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Int64Map(src map[string]int64) map[string]*int64 { return conv.PointerMap(src) } + +// Int64ValueMap converts a string map of int64 pointers into a string map of int64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Int64ValueMap(src map[string]*int64) map[string]int64 { return conv.ValueMap(src) } + +// Uint16 turn a pointer to of the uint16 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint16(v uint16) *uint16 { return conv.Pointer(v) } + +// Uint16Value turn the value of the uint16 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint16Value(v *uint16) uint16 { return conv.Value(v) } + +// Uint16Slice converts a slice of uint16 values into a slice of uint16 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint16Slice(src []uint16) []*uint16 { return conv.PointerSlice(src) } + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of uint16 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint16ValueSlice(src []*uint16) []uint16 { return conv.ValueSlice(src) } + +// Uint16Map converts a string map of uint16 values into a string map of uint16 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint16Map(src map[string]uint16) map[string]*uint16 { return conv.PointerMap(src) } + +// Uint16ValueMap converts a string map of uint16 pointers into a string map of uint16 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { return conv.ValueMap(src) } + +// Uint turn a pointer to of the uint value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint(v uint) *uint { return conv.Pointer(v) } + +// UintValue turn the value of the uint pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func UintValue(v *uint) uint { return conv.Value(v) } + +// UintSlice converts a slice of uint values into a slice of uint pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func UintSlice(src []uint) []*uint { return conv.PointerSlice(src) } + +// UintValueSlice converts a slice of uint pointers into a slice of uint values. +// +// Deprecated: use [conv.ValueSlice] instead. +func UintValueSlice(src []*uint) []uint { return conv.ValueSlice(src) } + +// UintMap converts a string map of uint values into a string map of uint pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func UintMap(src map[string]uint) map[string]*uint { return conv.PointerMap(src) } + +// UintValueMap converts a string map of uint pointers into a string map of uint values. +// +// Deprecated: use [conv.ValueMap] instead. +func UintValueMap(src map[string]*uint) map[string]uint { return conv.ValueMap(src) } + +// Uint32 turn a pointer to of the uint32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint32(v uint32) *uint32 { return conv.Pointer(v) } + +// Uint32Value turn the value of the uint32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint32Value(v *uint32) uint32 { return conv.Value(v) } + +// Uint32Slice converts a slice of uint32 values into a slice of uint32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint32Slice(src []uint32) []*uint32 { return conv.PointerSlice(src) } + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of uint32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint32ValueSlice(src []*uint32) []uint32 { return conv.ValueSlice(src) } + +// Uint32Map converts a string map of uint32 values into a string map of uint32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint32Map(src map[string]uint32) map[string]*uint32 { return conv.PointerMap(src) } + +// Uint32ValueMap converts a string map of uint32 pointers into a string map of uint32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { return conv.ValueMap(src) } + +// Uint64 turn a pointer to of the uint64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Uint64(v uint64) *uint64 { return conv.Pointer(v) } + +// Uint64Value turn the value of the uint64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Uint64Value(v *uint64) uint64 { return conv.Value(v) } + +// Uint64Slice converts a slice of uint64 values into a slice of uint64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Uint64Slice(src []uint64) []*uint64 { return conv.PointerSlice(src) } + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of uint64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Uint64ValueSlice(src []*uint64) []uint64 { return conv.ValueSlice(src) } + +// Uint64Map converts a string map of uint64 values into a string map of uint64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Uint64Map(src map[string]uint64) map[string]*uint64 { return conv.PointerMap(src) } + +// Uint64ValueMap converts a string map of uint64 pointers into a string map of uint64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { return conv.ValueMap(src) } + +// Float32 turn a pointer to of the float32 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Float32(v float32) *float32 { return conv.Pointer(v) } + +// Float32Value turn the value of the float32 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Float32Value(v *float32) float32 { return conv.Value(v) } + +// Float32Slice converts a slice of float32 values into a slice of float32 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Float32Slice(src []float32) []*float32 { return conv.PointerSlice(src) } + +// Float32ValueSlice converts a slice of float32 pointers into a slice of float32 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Float32ValueSlice(src []*float32) []float32 { return conv.ValueSlice(src) } + +// Float32Map converts a string map of float32 values into a string map of float32 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Float32Map(src map[string]float32) map[string]*float32 { return conv.PointerMap(src) } + +// Float32ValueMap converts a string map of float32 pointers into a string map of float32 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Float32ValueMap(src map[string]*float32) map[string]float32 { return conv.ValueMap(src) } + +// Float64 turn a pointer to of the float64 value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Float64(v float64) *float64 { return conv.Pointer(v) } + +// Float64Value turn the value of the float64 pointer passed in or 0 if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func Float64Value(v *float64) float64 { return conv.Value(v) } + +// Float64Slice converts a slice of float64 values into a slice of float64 pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func Float64Slice(src []float64) []*float64 { return conv.PointerSlice(src) } + +// Float64ValueSlice converts a slice of float64 pointers into a slice of float64 values. +// +// Deprecated: use [conv.ValueSlice] instead. +func Float64ValueSlice(src []*float64) []float64 { return conv.ValueSlice(src) } + +// Float64Map converts a string map of float64 values into a string map of float64 pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func Float64Map(src map[string]float64) map[string]*float64 { return conv.PointerMap(src) } + +// Float64ValueMap converts a string map of float64 pointers into a string map of float64 values. +// +// Deprecated: use [conv.ValueMap] instead. +func Float64ValueMap(src map[string]*float64) map[string]float64 { return conv.ValueMap(src) } + +// Time turn a pointer to of the time.Time value passed in. +// +// Deprecated: use [conv.Pointer] instead. +func Time(v time.Time) *time.Time { return conv.Pointer(v) } + +// TimeValue turn the value of the time.Time pointer passed in or time.Time{} if the pointer is nil. +// +// Deprecated: use [conv.Value] instead. +func TimeValue(v *time.Time) time.Time { return conv.Value(v) } + +// TimeSlice converts a slice of time.Time values into a slice of time.Time pointers. +// +// Deprecated: use [conv.PointerSlice] instead. +func TimeSlice(src []time.Time) []*time.Time { return conv.PointerSlice(src) } + +// TimeValueSlice converts a slice of time.Time pointers into a slice of time.Time values +// +// Deprecated: use [conv.ValueSlice] instead. +func TimeValueSlice(src []*time.Time) []time.Time { return conv.ValueSlice(src) } + +// TimeMap converts a string map of time.Time values into a string map of time.Time pointers. +// +// Deprecated: use [conv.PointerMap] instead. +func TimeMap(src map[string]time.Time) map[string]*time.Time { return conv.PointerMap(src) } + +// TimeValueMap converts a string map of time.Time pointers into a string map of time.Time values. +// +// Deprecated: use [conv.ValueMap] instead. +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { return conv.ValueMap(src) } diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go new file mode 100644 index 00000000000..b54b57478af --- /dev/null +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -0,0 +1,47 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. +// +// You may also use it standalone for your projects. +// +// NOTE: all features that used to be exposed as package-level members (constants, variables, +// functions and types) are now deprecated and are superseded by equivalent features in +// more specialized sub-packages. +// Moving forward, no additional feature will be added to the [swag] API directly at the root package level, +// which remains there for backward-compatibility purposes. +// +// Child modules will continue to evolve or some new ones may be added in the future. +// +// # Modules +// +// - [cmdutils] utilities to work with CLIs +// +// - [conv] type conversion utilities +// +// - [fileutils] file utilities +// +// - [jsonname] JSON utilities +// +// - [jsonutils] JSON utilities +// +// - [loading] file loading +// +// - [mangling] safe name generation +// +// - [netutils] networking utilities +// +// - [stringutils] `string` utilities +// +// - [typeutils] `go` types utilities +// +// - [yamlutils] YAML utilities +// +// # Dependencies +// +// This repo has a few dependencies outside of the standard library: +// +// - YAML utilities depend on [go.yaml.in/yaml/v3] +package swag + +//go:generate mockery diff --git a/vendor/github.com/go-openapi/swag/fileutils/LICENSE b/vendor/github.com/go-openapi/swag/fileutils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/fileutils/doc.go b/vendor/github.com/go-openapi/swag/fileutils/doc.go new file mode 100644 index 00000000000..859a200d841 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/doc.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package fileutils exposes utilities to deal with files and paths. +// +// Currently, there is: +// - [File] to represent an abstraction of an uploaded file. +// For instance, this is used by [github.com/go-openapi/runtime.File]. +// - path search utilities (e.g. finding packages in the GO search path) +package fileutils diff --git a/vendor/github.com/go-openapi/swag/fileutils/file.go b/vendor/github.com/go-openapi/swag/fileutils/file.go new file mode 100644 index 00000000000..5ad4cfaeafa --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/file.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package fileutils + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/fileutils/path.go b/vendor/github.com/go-openapi/swag/fileutils/path.go new file mode 100644 index 00000000000..dd09f690bf8 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils/path.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package fileutils + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +// GOPATHKey represents the env key for gopath +const GOPATHKey = "GOPATH" + +// FindInSearchPath finds a package in a provided lists of paths +func FindInSearchPath(searchPath, pkg string) string { + pathsList := filepath.SplitList(searchPath) + for _, path := range pathsList { + if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { + if _, err := os.Stat(evaluatedPath); err == nil { + return evaluatedPath + } + } + } + return "" +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. +func FindInGoSearchPath(pkg string) string { + return FindInSearchPath(FullGoSearchPath(), pkg) +} + +// FullGoSearchPath gets the search paths for finding packages +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. +func FullGoSearchPath() string { + allPaths := os.Getenv(GOPATHKey) + if allPaths == "" { + allPaths = filepath.Join(os.Getenv("HOME"), "go") + } + if allPaths != "" { + allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") + } else { + allPaths = runtime.GOROOT() + } + return allPaths +} diff --git a/vendor/github.com/go-openapi/swag/fileutils_iface.go b/vendor/github.com/go-openapi/swag/fileutils_iface.go new file mode 100644 index 00000000000..f3e79a0e4bc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/fileutils_iface.go @@ -0,0 +1,33 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/fileutils" + +// GOPATHKey represents the env key for gopath +// +// Deprecated: use [fileutils.GOPATHKey] instead. +const GOPATHKey = fileutils.GOPATHKey + +// File represents an uploaded file. +// +// Deprecated: use [fileutils.File] instead. +type File = fileutils.File + +// FindInSearchPath finds a package in a provided lists of paths. +// +// Deprecated: use [fileutils.FindInSearchPath] instead. +func FindInSearchPath(searchPath, pkg string) string { + return fileutils.FindInSearchPath(searchPath, pkg) +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +// +// Deprecated: use [fileutils.FindInGoSearchPath] instead. +func FindInGoSearchPath(pkg string) string { return fileutils.FindInGoSearchPath(pkg) } + +// FullGoSearchPath gets the search paths for finding packages +// +// Deprecated: use [fileutils.FullGoSearchPath] instead. +func FullGoSearchPath() string { return fileutils.FullGoSearchPath() } diff --git a/vendor/github.com/go-openapi/swag/go.work b/vendor/github.com/go-openapi/swag/go.work new file mode 100644 index 00000000000..1e537f0749b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work @@ -0,0 +1,20 @@ +use ( + . + ./cmdutils + ./conv + ./fileutils + ./jsonname + ./jsonutils + ./jsonutils/adapters/easyjson + ./jsonutils/adapters/testintegration + ./jsonutils/adapters/testintegration/benchmarks + ./jsonutils/fixtures_test + ./loading + ./mangling + ./netutils + ./stringutils + ./typeutils + ./yamlutils +) + +go 1.24.0 diff --git a/vendor/github.com/go-openapi/swag/go.work.sum b/vendor/github.com/go-openapi/swag/go.work.sum new file mode 100644 index 00000000000..c1308cafa67 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work.sum @@ -0,0 +1,7 @@ +github.com/go-openapi/testify/v2 v2.0.1/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= diff --git a/vendor/github.com/go-openapi/swag/jsonname/LICENSE b/vendor/github.com/go-openapi/swag/jsonname/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/jsonname/doc.go b/vendor/github.com/go-openapi/swag/jsonname/doc.go new file mode 100644 index 00000000000..79232eaca47 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package jsonname is a provider of json property names from go properties. +package jsonname diff --git a/vendor/github.com/go-openapi/swag/jsonname/name_provider.go b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go new file mode 100644 index 00000000000..8eaf1bece8d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonname + +import ( + "reflect" + "strings" + "sync" +) + +// DefaultJSONNameProvider is the default cache for types. +var DefaultJSONNameProvider = NewNameProvider() + +// NameProvider represents an object capable of translating from go property names +// to json property names. +// +// This type is thread-safe. +// +// See [github.com/go-openapi/jsonpointer.Pointer] for an example. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject any) []string { + n.lock.Lock() + defer n.lock.Unlock() + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + res := make([]string, 0, len(names.jsonNames)) + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject any, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject any, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} diff --git a/vendor/github.com/go-openapi/swag/jsonname_iface.go b/vendor/github.com/go-openapi/swag/jsonname_iface.go new file mode 100644 index 00000000000..303a007f6f4 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonname_iface.go @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "github.com/go-openapi/swag/jsonname" +) + +// DefaultJSONNameProvider is the default cache for types +// +// Deprecated: use [jsonname.DefaultJSONNameProvider] instead. +var DefaultJSONNameProvider = jsonname.DefaultJSONNameProvider + +// NameProvider represents an object capable of translating from go property names +// to json property names. +// +// Deprecated: use [jsonname.NameProvider] instead. +type NameProvider = jsonname.NameProvider + +// NewNameProvider creates a new name provider +// +// Deprecated: use [jsonname.NewNameProvider] instead. +func NewNameProvider() *NameProvider { return jsonname.NewNameProvider() } diff --git a/vendor/github.com/go-openapi/swag/jsonutils/LICENSE b/vendor/github.com/go-openapi/swag/jsonutils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/jsonutils/README.md b/vendor/github.com/go-openapi/swag/jsonutils/README.md new file mode 100644 index 00000000000..d745cdb466e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/README.md @@ -0,0 +1,108 @@ + # jsonutils + +`jsonutils` exposes a few tools to work with JSON: + +- a fast, simple `Concat` to concatenate (not merge) JSON objects and arrays +- `FromDynamicJSON` to convert a data structure into a "dynamic JSON" data structure +- `ReadJSON` and `WriteJSON` behave like `json.Unmarshal` and `json.Marshal`, + with the ability to use another underlying serialization library through an `Adapter` + configured at runtime +- a `JSONMapSlice` structure that may be used to store JSON objects with the order of keys maintained + +## Dynamic JSON + +We call "dynamic JSON" the go data structure that results from unmarshaling JSON like this: + +```go + var value any + jsonBytes := `{"a": 1, ... }` + _ = json.Unmarshal(jsonBytes, &value) +``` + +In this configuration, the standard library mappings are as follows: + +| JSON | go | +|-----------|------------------| +| `number` | `float64` | +| `string` | `string` | +| `boolean` | `bool` | +| `null` | `nil` | +| `object` | `map[string]any` | +| `array` | `[]any` | + +## Map slices + +When using `JSONMapSlice`, the ordering of keys is ensured by replacing +mappings to `map[string]any` by a `JSONMapSlice` which is an (ordered) +slice of `JSONMapItem`s. + +Notice that a similar feature is available for YAML (see [`yamlutils`](../yamlutils)), +with a `YAMLMapSlice` type based on the `JSONMapSlice`. + +`JSONMapSlice` is similar to an ordered map, but the keys are not retrieved +in constant time. + +Another difference with the the above standard mappings is that numbers don't always map +to a `float64`: if the value is a JSON integer, it unmarshals to `int64`. + +See also [some examples](https://pkg.go.dev/github.com/go-openapi/swag/jsonutils#pkg-examples) + +## Adapters + +`ReadJSON`, `WriteJSON` and `FromDynamicJSON` (which is a combination of the latter two) +are wrappers on top of `json.Unmarshal` and `json.Marshal`. + +By default, the adapter merely wraps the standard library. + +The adapter may be used to register other JSON serialization libraries, +possibly several ones at the same time. + +If the value passed is identified as an "ordered map" (i.e. implements `ifaces.Ordered` +or `ifaces.SetOrdered`, the adapter favors the "ordered" JSON behavior and tries to +find a registered implementation that support ordered keys in objects. + +Our standard library implementation supports this. + +As of `v0.25.0`, we support through such an adapter the popular `mailru/easyjson` +library, which kicks in when the passed values support the `easyjson.Unmarshaler` +or `easyjson.Marshaler` interfaces. + +In the future, we plan to add more similar libraries that compete on the go JSON +serializers scene. + +## Registering an adapter + +In package `github.com/go-openapi/swag/easyjson/adapters`, several adapters are available. + +Each adapter is an independent go module. Hence you'll pick its dependencies only if you import it. + +At this moment we provide: +* `stdlib`: JSON adapter based on the standard library +* `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` + +The adapters provide the basic `Marshal` and `Unmarshal` capabilities, plus an implementation +of the `MapSlice` pattern. + +You may also build your own adapter based on your specific use-case. An adapter is not required to implement +all capabilities. + +Every adapter comes with a `Register` function, possibly with some options, to register the adapter +to a global registry. + +For example, to enable `easyjson` to be used in `ReadJSON` and `WriteJSON`, you would write something like: + +```go + import ( + "github.com/go-openapi/swag/jsonutils/adapters" + easyjson "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + ) + + func init() { + easyjson.Register(adapters.Registry) + } +``` + +You may register several adapters. In this case, capability matching is evaluated from the last registered +adapters (LIFO). + +## [Benchmarks](./adapters/testintegration/benchmarks/README.md) diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go new file mode 100644 index 00000000000..76d3898fca5 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go @@ -0,0 +1,8 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package adapters exposes a registry of adapters to multiple +// JSON serialization libraries. +// +// All interfaces are defined in package [ifaces.Adapter]. +package adapters diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go new file mode 100644 index 00000000000..1fd43a1fad5 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package ifaces exposes all interfaces to work with adapters. +package ifaces diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go new file mode 100644 index 00000000000..7805e5e5e39 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go @@ -0,0 +1,84 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package ifaces + +import ( + _ "encoding/json" // for documentation purpose + "iter" +) + +// Ordered knows how to iterate over the (key,value) pairs of a JSON object. +type Ordered interface { + OrderedItems() iter.Seq2[string, any] +} + +// SetOrdered knows how to append or update the keys of a JSON object, +// given an iterator over (key,value) pairs. +// +// If the provided iterator is nil then the receiver should be set to nil. +type SetOrdered interface { + SetOrderedItems(iter.Seq2[string, any]) +} + +// OrderedMap represent a JSON object (i.e. like a map[string,any]), +// and knows how to serialize and deserialize JSON with the order of keys maintained. +type OrderedMap interface { + Ordered + SetOrdered + + OrderedMarshalJSON() ([]byte, error) + OrderedUnmarshalJSON([]byte) error +} + +// MarshalAdapter behaves likes the standard library [json.Marshal]. +type MarshalAdapter interface { + Poolable + + Marshal(any) ([]byte, error) +} + +// OrderedMarshalAdapter behaves likes the standard library [json.Marshal], preserving the order of keys in objects. +type OrderedMarshalAdapter interface { + Poolable + + OrderedMarshal(Ordered) ([]byte, error) +} + +// UnmarshalAdapter behaves likes the standard library [json.Unmarshal]. +type UnmarshalAdapter interface { + Poolable + + Unmarshal([]byte, any) error +} + +// OrderedUnmarshalAdapter behaves likes the standard library [json.Unmarshal], preserving the order of keys in objects. +type OrderedUnmarshalAdapter interface { + Poolable + + OrderedUnmarshal([]byte, SetOrdered) error +} + +// Adapter exposes an interface like the standard [json] library. +type Adapter interface { + MarshalAdapter + UnmarshalAdapter + + OrderedAdapter +} + +// OrderedAdapter exposes interfaces to process JSON and keep the order of object keys. +type OrderedAdapter interface { + OrderedMarshalAdapter + OrderedUnmarshalAdapter + NewOrderedMap(capacity int) OrderedMap +} + +type Poolable interface { + // Self-redeem: for [Adapter] s that are allocated from a pool. + // The [Adapter] must not be used after calling [Redeem]. + Redeem() + + // Reset the state of the [Adapter], if any. + Reset() +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go new file mode 100644 index 00000000000..2d6c69f4e60 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package ifaces + +import ( + "strings" +) + +// Capability indicates what a JSON adapter is capable of. +type Capability uint8 + +const ( + CapabilityMarshalJSON Capability = 1 << iota + CapabilityUnmarshalJSON + CapabilityOrderedMarshalJSON + CapabilityOrderedUnmarshalJSON + CapabilityOrderedMap +) + +func (c Capability) String() string { + switch c { + case CapabilityMarshalJSON: + return "MarshalJSON" + case CapabilityUnmarshalJSON: + return "UnmarshalJSON" + case CapabilityOrderedMarshalJSON: + return "OrderedMarshalJSON" + case CapabilityOrderedUnmarshalJSON: + return "OrderedUnmarshalJSON" + case CapabilityOrderedMap: + return "OrderedMap" + default: + return "" + } +} + +// Capabilities holds several unitary capability flags +type Capabilities uint8 + +// Has some capability flag enabled. +func (c Capabilities) Has(capability Capability) bool { + return Capability(c)&capability > 0 +} + +func (c Capabilities) String() string { + var w strings.Builder + + first := true + for _, capability := range []Capability{ + CapabilityMarshalJSON, + CapabilityUnmarshalJSON, + CapabilityOrderedMarshalJSON, + CapabilityOrderedUnmarshalJSON, + CapabilityOrderedMap, + } { + if c.Has(capability) { + if !first { + w.WriteByte('|') + } else { + first = false + } + w.WriteString(capability.String()) + } + } + + return w.String() +} + +const ( + AllCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | + uint8(CapabilityUnmarshalJSON) | + uint8(CapabilityOrderedMarshalJSON) | + uint8(CapabilityOrderedUnmarshalJSON) | + uint8(CapabilityOrderedMap)) + + AllUnorderedCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | uint8(CapabilityUnmarshalJSON)) +) + +// RegistryEntry describes how any given adapter registers its capabilities to the [Registrar]. +type RegistryEntry struct { + Who string + What Capabilities + Constructor func() Adapter + Support func(what Capability, value any) bool +} + +// Registrar is a type that knows how to keep registration calls from adapters. +type Registrar interface { + RegisterFor(RegistryEntry) +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go new file mode 100644 index 00000000000..3062acaff26 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go @@ -0,0 +1,229 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package adapters + +import ( + "fmt" + "reflect" + "slices" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + stdlib "github.com/go-openapi/swag/jsonutils/adapters/stdlib/json" +) + +// Registry holds the global registry for registered adapters. +var Registry = NewRegistrar() + +var ( + defaultRegistered = stdlib.Register + + _ ifaces.Registrar = &Registrar{} +) + +type registryError string + +func (e registryError) Error() string { + return string(e) +} + +// ErrRegistry indicates an error returned by the [Registrar]. +var ErrRegistry registryError = "JSON adapters registry error" + +type registry []*ifaces.RegistryEntry + +// Registrar holds registered [ifaces.Adapters] for different serialization capabilities. +// +// Internally, it maintains a cache for data types that favor a given adapter. +type Registrar struct { + marshalerRegistry registry + unmarshalerRegistry registry + orderedMarshalerRegistry registry + orderedUnmarshalerRegistry registry + orderedMapRegistry registry + + gmx sync.RWMutex + + // cache indexed by value type, so we don't have to lookup + marshalerCache map[reflect.Type]*ifaces.RegistryEntry + unmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedUnmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMapCache map[reflect.Type]*ifaces.RegistryEntry +} + +func NewRegistrar() *Registrar { + r := &Registrar{} + + r.marshalerRegistry = make(registry, 0, 1) + r.unmarshalerRegistry = make(registry, 0, 1) + r.orderedMarshalerRegistry = make(registry, 0, 1) + r.orderedUnmarshalerRegistry = make(registry, 0, 1) + r.orderedMapRegistry = make(registry, 0, 1) + + r.marshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.unmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedUnmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMapCache = make(map[reflect.Type]*ifaces.RegistryEntry) + + defaultRegistered(r) + + return r +} + +// ClearCache resets the internal type cache. +func (r *Registrar) ClearCache() { + r.gmx.Lock() + r.clearCache() + r.gmx.Unlock() +} + +// Reset the [Registrar] to its defaults. +func (r *Registrar) Reset() { + r.gmx.Lock() + r.clearCache() + r.marshalerRegistry = r.marshalerRegistry[:0] + r.unmarshalerRegistry = r.unmarshalerRegistry[:0] + r.orderedMarshalerRegistry = r.orderedMarshalerRegistry[:0] + r.orderedUnmarshalerRegistry = r.orderedUnmarshalerRegistry[:0] + r.orderedMapRegistry = r.orderedMapRegistry[:0] + r.gmx.Unlock() + + defaultRegistered(r) +} + +// RegisterFor registers an adapter for some JSON capabilities. +func (r *Registrar) RegisterFor(entry ifaces.RegistryEntry) { + r.gmx.Lock() + if entry.What.Has(ifaces.CapabilityMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityMarshalJSON) + r.marshalerRegistry = slices.Insert(r.marshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityUnmarshalJSON) + r.unmarshalerRegistry = slices.Insert(r.unmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMarshalJSON) + r.orderedMarshalerRegistry = slices.Insert(r.orderedMarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedUnmarshalJSON) + r.orderedUnmarshalerRegistry = slices.Insert(r.orderedUnmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMap) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMap) + r.orderedMapRegistry = slices.Insert(r.orderedMapRegistry, 0, &e) + } + r.gmx.Unlock() +} + +// AdapterFor returns an [ifaces.Adapter] that supports this capability for this type of value. +// +// The [ifaces.Adapter] may be redeemed to its pool using its Redeem() method, for adapters that support global +// pooling. When this is not the case, the redeem function is just a no-operation. +func (r *Registrar) AdapterFor(capability ifaces.Capability, value any) ifaces.Adapter { + entry := r.findFirstFor(capability, value) + if entry == nil { + return nil + } + + return entry.Constructor() +} + +func (r *Registrar) clearCache() { + clear(r.marshalerCache) + clear(r.unmarshalerCache) + clear(r.orderedMarshalerCache) + clear(r.orderedUnmarshalerCache) + clear(r.orderedMapCache) +} + +func (r *Registrar) findFirstFor(capability ifaces.Capability, value any) *ifaces.RegistryEntry { + switch capability { + case ifaces.CapabilityMarshalJSON: + return r.findFirstInRegistryFor(r.marshalerRegistry, r.marshalerCache, capability, value) + case ifaces.CapabilityUnmarshalJSON: + return r.findFirstInRegistryFor(r.unmarshalerRegistry, r.unmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMarshalJSON: + return r.findFirstInRegistryFor(r.orderedMarshalerRegistry, r.orderedMarshalerCache, capability, value) + case ifaces.CapabilityOrderedUnmarshalJSON: + return r.findFirstInRegistryFor(r.orderedUnmarshalerRegistry, r.orderedUnmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMap: + return r.findFirstInRegistryFor(r.orderedMapRegistry, r.orderedMapCache, capability, value) + default: + panic(fmt.Errorf("unsupported capability %d: %w", capability, ErrRegistry)) + } +} + +func (r *Registrar) findFirstInRegistryFor(reg registry, cache map[reflect.Type]*ifaces.RegistryEntry, capability ifaces.Capability, value any) *ifaces.RegistryEntry { + r.gmx.RLock() + if len(reg) > 1 { + if entry, ok := cache[reflect.TypeOf(value)]; ok { + // cache hit + r.gmx.RUnlock() + return entry + } + } + + for _, entry := range reg { + if !entry.Support(capability, value) { + continue + } + + r.gmx.RUnlock() + + // update the internal cache + r.gmx.Lock() + cache[reflect.TypeOf(value)] = entry + r.gmx.Unlock() + + return entry + } + + // no adapter found + r.gmx.RUnlock() + + return nil +} + +// MarshalAdapterFor returns the first adapter that knows how to Marshal this type of value. +func MarshalAdapterFor(value any) ifaces.MarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityMarshalJSON, value) +} + +// OrderedMarshalAdapterFor returns the first adapter that knows how to OrderedMarshal this type of value. +func OrderedMarshalAdapterFor(value ifaces.Ordered) ifaces.OrderedMarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedMarshalJSON, value) +} + +// UnmarshalAdapterFor returns the first adapter that knows how to Unmarshal this type of value. +func UnmarshalAdapterFor(value any) ifaces.UnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityUnmarshalJSON, value) +} + +// OrderedUnmarshalAdapterFor provides the first adapter that knows how to OrderedUnmarshal this type of value. +func OrderedUnmarshalAdapterFor(value ifaces.SetOrdered) ifaces.OrderedUnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, value) +} + +// NewOrderedMap provides the "ordered map" implementation provided by the registry. +func NewOrderedMap(capacity int) ifaces.OrderedMap { + var v any + adapter := Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, v) + if adapter == nil { + return nil + } + + defer adapter.Redeem() + return adapter.NewOrderedMap(capacity) +} + +func noopRedeemer() {} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go new file mode 100644 index 00000000000..0213ff5c29f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" +) + +const sensibleBufferSize = 8192 + +type jsonError string + +func (e jsonError) Error() string { + return string(e) +} + +// ErrStdlib indicates that an error comes from the stdlib JSON adapter +var ErrStdlib jsonError = "error from the JSON adapter stdlib" + +var _ ifaces.Adapter = &Adapter{} + +type Adapter struct { +} + +// NewAdapter yields an [ifaces.Adapter] using the standard library. +func NewAdapter() *Adapter { + return &Adapter{} +} + +func (a *Adapter) Marshal(value any) ([]byte, error) { + return stdjson.Marshal(value) +} + +func (a *Adapter) Unmarshal(data []byte, value any) error { + return stdjson.Unmarshal(data, value) +} + +func (a *Adapter) OrderedMarshal(value ifaces.Ordered) ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + if typeutils.IsNil(value) { + w.RawString("null") + + return w.BuildBytes() + } + + w.RawByte('{') + first := true + for k, v := range value.OrderedItems() { + if first { + first = false + } else { + w.RawByte(',') + } + + w.String(k) + w.RawByte(':') + + switch val := v.(type) { + case ifaces.Ordered: + w.Raw(a.OrderedMarshal(val)) + default: + w.Raw(stdjson.Marshal(v)) + } + } + + w.RawByte('}') + + return w.BuildBytes() +} + +func (a *Adapter) OrderedUnmarshal(data []byte, value ifaces.SetOrdered) error { + var m MapSlice + if err := m.OrderedUnmarshalJSON(data); err != nil { + return err + } + + if typeutils.IsNil(m) { + // force input value to nil + value.SetOrderedItems(nil) + + return nil + } + + value.SetOrderedItems(m.OrderedItems()) + + return nil +} + +func (a *Adapter) NewOrderedMap(capacity int) ifaces.OrderedMap { + m := make(MapSlice, 0, capacity) + + return &m +} + +// Redeem the [Adapter] when it comes from a pool. +// +// The adapter becomes immediately unusable once redeemed. +func (a *Adapter) Redeem() { + if a == nil { + return + } + + RedeemAdapter(a) +} + +func (a *Adapter) Reset() { +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go new file mode 100644 index 00000000000..5ea1b440425 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package json implements an [ifaces.Adapter] using the standard library. +package json diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go new file mode 100644 index 00000000000..b5aa1c7972e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go @@ -0,0 +1,320 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + "errors" + "fmt" + "io" + "math" + "strconv" + + "github.com/go-openapi/swag/conv" +) + +type token struct { + stdjson.Token +} + +func (t token) String() string { + if t == invalidToken { + return "invalid token" + } + if t == eofToken { + return "EOF" + } + + return fmt.Sprintf("%v", t.Token) +} + +func (t token) Kind() tokenKind { + switch t.Token.(type) { + case nil: + return tokenNull + case stdjson.Delim: + return tokenDelim + case bool: + return tokenBool + case float64: + return tokenFloat + case stdjson.Number: + return tokenNumber + case string: + return tokenString + default: + return tokenUndef + } +} + +func (t token) Delim() byte { + r, ok := t.Token.(stdjson.Delim) + if !ok { + return 0 + } + + return byte(r) +} + +type tokenKind uint8 + +const ( + tokenUndef tokenKind = iota + tokenString + tokenNumber + tokenFloat + tokenBool + tokenNull + tokenDelim +) + +var ( + invalidToken = token{ + Token: stdjson.Token(struct{}{}), + } + + eofToken = token{ + Token: stdjson.Token(&struct{}{}), + } + + undefToken = token{ + Token: stdjson.Token(uint8(0)), + } +) + +// jlexer apes easyjson's jlexer, but uses the standard library decoder under the hood. +type jlexer struct { + buf *bytesReader + dec *stdjson.Decoder + err error + // current token + next token + // started bool +} + +type bytesReader struct { + buf []byte + offset int +} + +func (b *bytesReader) Reset() { + b.buf = nil + b.offset = 0 +} + +func (b *bytesReader) Read(p []byte) (int, error) { + if b.offset >= len(b.buf) { + return 0, io.EOF + } + + n := len(p) + buf := b.buf[b.offset:] + m := len(buf) + + if n >= m { + copy(p, buf) + b.offset += m + + return m, nil + } + + copy(p, buf[:n]) + b.offset += n + + return n, nil +} + +var _ io.Reader = &bytesReader{} + +func newLexer(data []byte) *jlexer { + l := &jlexer{ + // current: undefToken, + next: undefToken, + } + l.buf = &bytesReader{ + buf: data, + } + l.dec = stdjson.NewDecoder(l.buf) // unfortunately, cannot pool this + + return l +} + +func (l *jlexer) Reset() { + l.err = nil + l.next = undefToken + // leave l.dec and l.buf alone, since they are replaced at every Borrow +} + +func (l *jlexer) Error() error { + return l.err +} + +func (l *jlexer) SetErr(err error) { + l.err = err +} + +func (l *jlexer) Ok() bool { + return l.err == nil +} + +// NextToken consumes a token +func (l *jlexer) NextToken() token { + if !l.Ok() { + return invalidToken + } + + if l.next != undefToken { + next := l.next + l.next = undefToken + + return next + } + + return l.fetchToken() +} + +// PeekToken returns the next token without consuming it +func (l *jlexer) PeekToken() token { + if l.next == undefToken { + l.next = l.fetchToken() + } + + return l.next +} + +func (l *jlexer) Skip() { + _ = l.NextToken() +} + +func (l *jlexer) IsDelim(c byte) bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + if next.Kind() != tokenDelim { + return false + } + + if next.Delim() != c { + return false + } + + return true +} + +func (l *jlexer) IsNull() bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + + return next.Kind() == tokenNull +} + +func (l *jlexer) Delim(c byte) { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenDelim { + l.err = fmt.Errorf("expected a delimiter token but got '%v': %w", tok, ErrStdlib) + + return + } + + if tok.Delim() != c { + l.err = fmt.Errorf("expected delimiter '%q' but got '%q': %w", c, tok.Delim(), ErrStdlib) + } +} + +func (l *jlexer) Null() { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenNull { + l.err = fmt.Errorf("expected a null token but got '%v': %w", tok, ErrStdlib) + } +} + +func (l *jlexer) Number() any { + if !l.Ok() { + return 0 + } + + tok := l.NextToken() + + switch tok.Kind() { //nolint:exhaustive + case tokenNumber: + n := tok.Token.(stdjson.Number).String() + f, _ := strconv.ParseFloat(n, 64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + case tokenFloat: + f := tok.Token.(float64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + default: + l.err = fmt.Errorf("expected a number token but got '%v': %w", tok, ErrStdlib) + + return 0 + } +} + +func (l *jlexer) Bool() bool { + if !l.Ok() { + return false + } + + tok := l.NextToken() + if tok.Kind() != tokenBool { + l.err = fmt.Errorf("expected a bool token but got '%v': %w", tok, ErrStdlib) + + return false + } + + return tok.Token.(bool) +} + +func (l *jlexer) String() string { + if !l.Ok() { + return "" + } + + tok := l.NextToken() + if tok.Kind() != tokenString { + l.err = fmt.Errorf("expected a string token but got '%v': %w", tok, ErrStdlib) + + return "" + } + + return tok.Token.(string) +} + +// Commas and colons are elided. +func (l *jlexer) fetchToken() token { + jtok, err := l.dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + return eofToken + } + + l.err = errors.Join(err, ErrStdlib) + return invalidToken + } + + return token{Token: jtok} +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go new file mode 100644 index 00000000000..54deef406f3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + "fmt" + "iter" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +var _ ifaces.OrderedMap = &MapSlice{} + +// MapSlice represents a JSON object, with the order of keys maintained. +type MapSlice []MapItem + +func (s MapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +func (s *MapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders a [MapSlice] as JSON bytes, preserving the order of keys. +func (s MapSlice) MarshalJSON() ([]byte, error) { + return s.OrderedMarshalJSON() +} + +func (s MapSlice) OrderedMarshalJSON() ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + s.marshalObject(w) + + return w.BuildBytes() // this clones data, so it's okay to redeem the writer and its buffer +} + +// UnmarshalJSON builds a [MapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as [MapSlice] slices and not map[string]any. +func (s *MapSlice) UnmarshalJSON(data []byte) error { + return s.OrderedUnmarshalJSON(data) +} + +func (s *MapSlice) OrderedUnmarshalJSON(data []byte) error { + l := poolOfLexers.Borrow(data) + defer func() { + poolOfLexers.Redeem(l) + }() + + s.unmarshalObject(l) + + return l.Error() +} + +func (s MapSlice) marshalObject(w *jwriter) { + if s == nil { + w.RawString("null") + + return + } + + w.RawByte('{') + + if len(s) == 0 { + w.RawByte('}') + + return + } + + s[0].marshalJSON(w) + + for i := 1; i < len(s); i++ { + w.RawByte(',') + s[i].marshalJSON(w) + } + + w.RawByte('}') +} + +func (s *MapSlice) unmarshalObject(in *jlexer) { + if in.IsNull() { + in.Skip() + + return + } + + in.Delim('{') // consume token + if !in.Ok() { + return + } + + result := make(MapSlice, 0) + + for in.Ok() && !in.IsDelim('}') { + var mi MapItem + + mi.unmarshalKeyValue(in) + result = append(result, mi) + } + + in.Delim('}') + + if !in.Ok() { + return + } + + *s = result +} + +// MapItem represents the value of a key in a JSON object held by [MapSlice]. +// +// Notice that [MapItem] should not be marshaled to or unmarshaled from JSON directly, +// use this type as part of a [MapSlice] when dealing with JSON bytes. +type MapItem struct { + Key string + Value any +} + +func (s MapItem) marshalJSON(w *jwriter) { + w.String(s.Key) + w.RawByte(':') + w.Raw(stdjson.Marshal(s.Value)) +} + +func (s *MapItem) unmarshalKeyValue(in *jlexer) { + key := in.String() // consume string + value := s.asInterface(in) // consume any value, including termination tokens '}' or ']' + + if !in.Ok() { + return + } + + s.Key = key + s.Value = value +} + +func (s *MapItem) unmarshalArray(in *jlexer) []any { + if in.IsNull() { + in.Skip() + + return nil + } + + in.Delim('[') // consume token + if !in.Ok() { + return nil + } + + ret := make([]any, 0) + + for in.Ok() && !in.IsDelim(']') { + ret = append(ret, s.asInterface(in)) + } + + in.Delim(']') + if !in.Ok() { + return nil + } + + return ret +} + +// asInterface is very much like [jlexer.Lexer.Interface], but unmarshals an object +// into a [MapSlice], not a map[string]any. +// +// We have to force parsing errors somehow, since [jlexer.Lexer] doesn't let us +// set a parsing error directly. +func (s *MapItem) asInterface(in *jlexer) any { + if !in.Ok() { + return nil + } + + tok := in.PeekToken() // look-ahead what the next token looks like + kind := tok.Kind() + + switch kind { + case tokenString: + return in.String() // consume string + + case tokenNumber, tokenFloat: + return in.Number() + + case tokenBool: + return in.Bool() + + case tokenNull: + in.Null() + + return nil + + case tokenDelim: + switch tok.Delim() { + case '{': // not consumed yet + ret := make(MapSlice, 0) + ret.unmarshalObject(in) // consumes the terminating '}' + + if in.Ok() { + return ret + } + + // lexer is in an error state: will exhaust + return nil + + case '[': // not consumed yet + return s.unmarshalArray(in) // consumes the terminating ']' + default: + in.SetErr(fmt.Errorf("unexpected delimiter: %v: %w", tok, ErrStdlib)) // force error + return nil + } + + case tokenUndef: + fallthrough + default: + if in.Ok() { + in.SetErr(fmt.Errorf("unexpected token: %v: %w", tok, ErrStdlib)) // force error + } + + return nil + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go new file mode 100644 index 00000000000..709b97c3046 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "encoding/json" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +type adaptersPool struct { + sync.Pool +} + +func (p *adaptersPool) Borrow() *Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) BorrowIface() ifaces.Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) Redeem(a *Adapter) { + p.Put(a) +} + +type writersPool struct { + sync.Pool +} + +func (p *writersPool) Borrow() *jwriter { + ptr := p.Get() + + jw := ptr.(*jwriter) + jw.Reset() + + return jw +} + +func (p *writersPool) Redeem(w *jwriter) { + p.Put(w) +} + +type lexersPool struct { + sync.Pool +} + +func (p *lexersPool) Borrow(data []byte) *jlexer { + ptr := p.Get() + + l := ptr.(*jlexer) + l.buf = poolOfReaders.Borrow(data) + l.dec = json.NewDecoder(l.buf) // cannot pool, not exposed by the encoding/json API + l.Reset() + + return l +} + +func (p *lexersPool) Redeem(l *jlexer) { + l.dec = nil + discard := l.buf + l.buf = nil + poolOfReaders.Redeem(discard) + p.Put(l) +} + +type readersPool struct { + sync.Pool +} + +func (p *readersPool) Borrow(data []byte) *bytesReader { + ptr := p.Get() + + b := ptr.(*bytesReader) + b.Reset() + b.buf = data + + return b +} + +func (p *readersPool) Redeem(b *bytesReader) { + p.Put(b) +} + +var ( + poolOfAdapters = &adaptersPool{ + Pool: sync.Pool{ + New: func() any { + return NewAdapter() + }, + }, + } + + poolOfWriters = &writersPool{ + Pool: sync.Pool{ + New: func() any { + return newJWriter() + }, + }, + } + + poolOfLexers = &lexersPool{ + Pool: sync.Pool{ + New: func() any { + return newLexer(nil) + }, + }, + } + + poolOfReaders = &readersPool{ + Pool: sync.Pool{ + New: func() any { + return &bytesReader{} + }, + }, + } +) + +// BorrowAdapter borrows an [Adapter] from the pool, recycling already allocated instances. +func BorrowAdapter() *Adapter { + return poolOfAdapters.Borrow() +} + +// BorrowAdapterIface borrows a stdlib [Adapter] and converts it directly +// to [ifaces.Adapter]. This is useful to avoid further allocations when +// translating the concrete type into an interface. +func BorrowAdapterIface() ifaces.Adapter { + return poolOfAdapters.BorrowIface() +} + +// RedeemAdapter redeems an [Adapter] to the pool, so it may be recycled. +func RedeemAdapter(a *Adapter) { + poolOfAdapters.Redeem(a) +} + +func RedeemAdapterIface(a ifaces.Adapter) { + concrete, ok := a.(*Adapter) + if ok { + poolOfAdapters.Redeem(concrete) + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go new file mode 100644 index 00000000000..fc8818694ea --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +func Register(dispatcher ifaces.Registrar) { + t := reflect.TypeOf(Adapter{}) + dispatcher.RegisterFor( + ifaces.RegistryEntry{ + Who: fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()), + What: ifaces.AllCapabilities, + Constructor: BorrowAdapterIface, + Support: support, + }) +} + +func support(_ ifaces.Capability, _ any) bool { + return true +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go new file mode 100644 index 00000000000..dc2325c1a30 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go @@ -0,0 +1,75 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "bytes" + "encoding/json" + "strings" +) + +type jwriter struct { + buf *bytes.Buffer + err error +} + +func newJWriter() *jwriter { + buf := make([]byte, 0, sensibleBufferSize) + + return &jwriter{buf: bytes.NewBuffer(buf)} +} + +func (w *jwriter) Reset() { + w.buf.Reset() + w.err = nil +} + +func (w *jwriter) RawString(s string) { + if w.err != nil { + return + } + w.buf.WriteString(s) +} + +func (w *jwriter) Raw(b []byte, err error) { + if w.err != nil { + return + } + if err != nil { + w.err = err + return + } + + _, _ = w.buf.Write(b) +} + +func (w *jwriter) RawByte(c byte) { + if w.err != nil { + return + } + w.buf.WriteByte(c) +} + +var quoteReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +func (w *jwriter) String(s string) { + if w.err != nil { + return + } + // escape quotes and \ + s = quoteReplacer.Replace(s) + + _ = w.buf.WriteByte('"') + json.HTMLEscape(w.buf, []byte(s)) + _ = w.buf.WriteByte('"') +} + +// BuildBytes returns a clone of the internal buffer. +func (w *jwriter) BuildBytes() ([]byte, error) { + if w.err != nil { + return nil, w.err + } + + return bytes.Clone(w.buf.Bytes()), nil +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/concat.go b/vendor/github.com/go-openapi/swag/jsonutils/concat.go new file mode 100644 index 00000000000..2068503af05 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/concat.go @@ -0,0 +1,92 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonutils + +import ( + "bytes" +) + +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + +const comma = byte(',') + +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } +} + +// ConcatJSON concatenates multiple json objects or arrays efficiently. +// +// Note that [ConcatJSON] performs a very simple (and fast) concatenation +// operation: it does not attempt to merge objects. +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last-- + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { + return blobs[0] + } + + var opening, closing byte + var idx, a int + buf := bytes.NewBuffer(nil) + + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + const minLengthIfNotEmpty = 3 + if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + _ = buf.WriteByte(closing) // never returns err != nil + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + _ = buf.WriteByte(comma) // never returns err != nil + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + _, _ = buf.Write(b[idx : len(b)-1]) // never returns err != nil + } else { // last one, strip only the leading bracket + _, _ = buf.Write(b[idx:]) + } + a++ + } + + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 && (opening == '{' || opening == '[') { + _ = buf.WriteByte(opening) // never returns err != nil + _ = buf.WriteByte(closing) + } + + return buf.Bytes() +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/doc.go new file mode 100644 index 00000000000..3926cc58d1b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/doc.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package jsonutils provides helpers to work with JSON. +// +// These utilities work with dynamic go structures to and from JSON. +package jsonutils diff --git a/vendor/github.com/go-openapi/swag/jsonutils/json.go b/vendor/github.com/go-openapi/swag/jsonutils/json.go new file mode 100644 index 00000000000..40753ce03fd --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/json.go @@ -0,0 +1,116 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonutils + +import ( + "bytes" + "encoding/json" + + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +// WriteJSON marshals a data structure as JSON. +// +// The difference with [json.Marshal] is that it may check among several alternatives +// to do so. +// +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: to allow types that are [easyjson.Marshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func WriteJSON(value any) ([]byte, error) { + if orderedMap, isOrdered := value.(ifaces.Ordered); isOrdered { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(orderedMap) + + if orderedMarshaler != nil { + defer orderedMarshaler.Redeem() + + return orderedMarshaler.OrderedMarshal(orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case + } + + marshaler := adapters.MarshalAdapterFor(value) + if marshaler != nil { + defer marshaler.Redeem() + + return marshaler.Marshal(value) + } + + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Marshal(value) // Codecov ignore // this is a safeguard not easily simulated in tests +} + +// ReadJSON unmarshals JSON data into a data structure. +// +// The difference with [json.Unmarshal] is that it may check among several alternatives +// to do so. +// +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: value must be a pointer. +// +// If the provided value implements [ifaces.SetOrdered], it is a considered an "ordered map" and [ReadJSON] +// will favor an adapter that supports the [ifaces.OrderedUnmarshal] feature, or fallback to +// an unordered behavior if none is found. +// +// NOTE: to allow types that are [easyjson.Unmarshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func ReadJSON(data []byte, value any) error { + trimmedData := bytes.Trim(data, "\x00") + + if orderedMap, isOrdered := value.(ifaces.SetOrdered); isOrdered { + // if the value is an ordered map, favors support for OrderedUnmarshal. + + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(orderedMap) + + if orderedUnmarshaler != nil { + defer orderedUnmarshaler.Redeem() + + return orderedUnmarshaler.OrderedUnmarshal(trimmedData, orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case + } + + unmarshaler := adapters.UnmarshalAdapterFor(value) + if unmarshaler != nil { + defer unmarshaler.Redeem() + + return unmarshaler.Unmarshal(trimmedData, value) + } + + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Unmarshal(trimmedData, value) // Codecov ignore // this is a safeguard not easily simulated in tests +} + +// FromDynamicJSON turns a go value into a properly JSON typed structure. +// +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped any, +// i.e. objects are represented by map[string]any, arrays by []any, and +// all numbers are represented as float64. +// +// NOTE: target must be a pointer. +// +// # Maintaining the order of keys in objects +// +// If source and target implement [ifaces.Ordered] and [ifaces.SetOrdered] respectively, +// they are considered "ordered maps" and the order of keys is maintained in the +// "jsonification" process. In that case, map[string]any values are replaced by (ordered) [JSONMapSlice] ones. +func FromDynamicJSON(source, target any) error { + b, err := WriteJSON(source) + if err != nil { + return err + } + + return ReadJSON(b, target) +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go new file mode 100644 index 00000000000..38dd3e24442 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package jsonutils + +import ( + "iter" + + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/typeutils" +) + +// JSONMapSlice represents a JSON object, with the order of keys maintained. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. +type JSONMapSlice []JSONMapItem + +// OrderedItems iterates over all (key,value) pairs with the order of keys maintained. +// +// This implements the [ifaces.Ordered] interface, so that [ifaces.Adapter] s know how to marshal +// keys in the desired order. +func (s JSONMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +// SetOrderedItems sets keys in the [JSONMapSlice] objects, as presented by +// the provided iterator. +// +// As a special case, if items is nil, this sets to receiver to a nil slice. +// +// This implements the [ifaces.SetOrdered] interface, so that [ifaces.Adapter] s know how to unmarshal +// keys in the desired order. +func (s *JSONMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, JSONMapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, JSONMapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders a [JSONMapSlice] as JSON bytes, preserving the order of keys. +// +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(s) + defer orderedMarshaler.Redeem() + + return orderedMarshaler.OrderedMarshal(s) +} + +// UnmarshalJSON builds a [JSONMapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as ordered [JSONMapSlice] slices and not map[string]any. +// +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = JSONMapSlice{} + } + + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(s) + defer orderedUnmarshaler.Redeem() + + return orderedUnmarshaler.OrderedUnmarshal(data, s) +} + +// JSONMapItem represents the value of a key in a JSON object held by [JSONMapSlice]. +// +// Notice that JSONMapItem should not be marshaled to or unmarshaled from JSON directly. +// +// Use this type as part of a [JSONMapSlice] when dealing with JSON bytes. +type JSONMapItem struct { + Key string + Value any +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils_iface.go b/vendor/github.com/go-openapi/swag/jsonutils_iface.go new file mode 100644 index 00000000000..7bd4105fa51 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils_iface.go @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "log" + + "github.com/go-openapi/swag/jsonutils" +) + +// JSONMapSlice represents a JSON object, with the order of keys maintained +// +// Deprecated: use [jsonutils.JSONMapSlice] instead, or [yamlutils.YAMLMapSlice] if you marshal YAML. +type JSONMapSlice = jsonutils.JSONMapSlice + +// JSONMapItem represents a JSON object, with the order of keys maintained +// +// Deprecated: use [jsonutils.JSONMapItem] instead. +type JSONMapItem = jsonutils.JSONMapItem + +// WriteJSON writes json data. +// +// Deprecated: use [jsonutils.WriteJSON] instead. +func WriteJSON(data any) ([]byte, error) { return jsonutils.WriteJSON(data) } + +// ReadJSON reads json data. +// +// Deprecated: use [jsonutils.ReadJSON] instead. +func ReadJSON(data []byte, value any) error { return jsonutils.ReadJSON(data, value) } + +// DynamicJSONToStruct converts an untyped JSON structure into a target data type. +// +// Deprecated: use [jsonutils.FromDynamicJSON] instead. +func DynamicJSONToStruct(data any, target any) error { + return jsonutils.FromDynamicJSON(data, target) +} + +// ConcatJSON concatenates multiple JSON objects efficiently. +// +// Deprecated: use [jsonutils.ConcatJSON] instead. +func ConcatJSON(blobs ...[]byte) []byte { return jsonutils.ConcatJSON(blobs...) } + +// ToDynamicJSON turns a go value into a properly JSON untyped structure. +// +// It is the same as [FromDynamicJSON], but doesn't check for errors. +// +// Deprecated: this function is a misnomer and is unsafe. Use [jsonutils.FromDynamicJSON] instead. +func ToDynamicJSON(value any) any { + var res any + if err := FromDynamicJSON(value, &res); err != nil { + log.Println(err) + } + + return res +} + +// FromDynamicJSON turns a go value into a properly JSON typed structure. +// +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped any, +// i.e. objects are represented by map[string]any, arrays by []any, and all +// scalar values are any. +// +// Deprecated: use [jsonutils.FromDynamicJSON] instead. +func FromDynamicJSON(data, target any) error { return jsonutils.FromDynamicJSON(data, target) } diff --git a/vendor/github.com/go-openapi/swag/loading/LICENSE b/vendor/github.com/go-openapi/swag/loading/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/loading/doc.go b/vendor/github.com/go-openapi/swag/loading/doc.go new file mode 100644 index 00000000000..8cf7bcb8b9d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package loading provides tools to load a file from http or from a local file system. +package loading diff --git a/vendor/github.com/go-openapi/swag/loading/errors.go b/vendor/github.com/go-openapi/swag/loading/errors.go new file mode 100644 index 00000000000..b3964289c74 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/errors.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +type loadingError string + +const ( + // ErrLoader is an error raised by the file loader utility + ErrLoader loadingError = "loader error" +) + +func (e loadingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/swag/loading/json.go b/vendor/github.com/go-openapi/swag/loading/json.go new file mode 100644 index 00000000000..59db12f5cfd --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/json.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +import ( + "encoding/json" + "errors" + "path/filepath" +) + +// JSONMatcher matches json for a file loader. +func JSONMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".json" || ext == ".jsn" || ext == ".jso" +} + +// JSONDoc loads a json document from either a file or a remote url. +func JSONDoc(path string, opts ...Option) (json.RawMessage, error) { + data, err := LoadFromFileOrHTTP(path, opts...) + if err != nil { + return nil, errors.Join(err, ErrLoader) + } + return json.RawMessage(data), nil +} diff --git a/vendor/github.com/go-openapi/swag/loading/loading.go b/vendor/github.com/go-openapi/swag/loading/loading.go new file mode 100644 index 00000000000..269fb74d167 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/loading.go @@ -0,0 +1,160 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +import ( + "context" + "embed" + "fmt" + "io" + "log" + "net/http" + "net/url" + "path" + "path/filepath" + "runtime" + "strings" +) + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in +func LoadFromFileOrHTTP(pth string, opts ...Option) ([]byte, error) { + o := optionsWithDefaults(opts) + return LoadStrategy(pth, o.ReadFileFunc(), loadHTTPBytes(opts...), opts...)(pth) +} + +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error), opts ...Option) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { + return remote + } + o := optionsWithDefaults(opts) + _, isEmbedFS := o.fs.(embed.FS) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) + if err != nil { + return nil, err + } + + cpth, hasPrefix := strings.CutPrefix(upth, "file://") + if !hasPrefix || isEmbedFS || runtime.GOOS != "windows" { + // crude processing: trim the file:// prefix. This leaves full URIs with a host with a (mostly) unexpected result + // regular file path provided: just normalize slashes + if isEmbedFS { + // on windows, we need to slash the path if FS is an embed FS. + return local(strings.TrimLeft(filepath.ToSlash(cpth), "./")) // remove invalid leading characters for embed FS + } + + return local(filepath.FromSlash(cpth)) + } + + // windows-only pre-processing of file://... URIs, excluding embed.FS + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) + } + } + + return local(filepath.FromSlash(upth)) + } +} + +func loadHTTPBytes(opts ...Option) func(path string) ([]byte, error) { + o := optionsWithDefaults(opts) + + return func(path string) ([]byte, error) { + client := o.client + timeoutCtx := context.Background() + var cancel func() + + if o.httpTimeout > 0 { + timeoutCtx, cancel = context.WithTimeout(timeoutCtx, o.httpTimeout) + defer cancel() + } + + req, err := http.NewRequestWithContext(timeoutCtx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + if o.basicAuthUsername != "" && o.basicAuthPassword != "" { + req.SetBasicAuth(o.basicAuthUsername, o.basicAuthPassword) + } + + for key, val := range o.customHeaders { + req.Header.Set(key, val) + } + + resp, err := client.Do(req) + defer func() { + if resp != nil { + if e := resp.Body.Close(); e != nil { + log.Println(e) + } + } + }() + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader) + } + + return io.ReadAll(resp.Body) + } +} diff --git a/vendor/github.com/go-openapi/swag/loading/options.go b/vendor/github.com/go-openapi/swag/loading/options.go new file mode 100644 index 00000000000..6674ac69e62 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/options.go @@ -0,0 +1,125 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +import ( + "io/fs" + "net/http" + "os" + "time" +) + +type ( + // Option provides options for loading a file over HTTP or from a file. + Option func(*options) + + httpOptions struct { + httpTimeout time.Duration + basicAuthUsername string + basicAuthPassword string + customHeaders map[string]string + client *http.Client + } + + fileOptions struct { + fs fs.ReadFileFS + } + + options struct { + httpOptions + fileOptions + } +) + +func (fo fileOptions) ReadFileFunc() func(string) ([]byte, error) { + if fo.fs == nil { + return os.ReadFile + } + + return fo.fs.ReadFile +} + +// WithTimeout sets a timeout for the remote file loader. +// +// The default timeout is 30s. +func WithTimeout(timeout time.Duration) Option { + return func(o *options) { + o.httpTimeout = timeout + } +} + +// WithBasicAuth sets a basic authentication scheme for the remote file loader. +func WithBasicAuth(username, password string) Option { + return func(o *options) { + o.basicAuthUsername = username + o.basicAuthPassword = password + } +} + +// WithCustomHeaders sets custom headers for the remote file loader. +func WithCustomHeaders(headers map[string]string) Option { + return func(o *options) { + if o.customHeaders == nil { + o.customHeaders = make(map[string]string, len(headers)) + } + + for header, value := range headers { + o.customHeaders[header] = value + } + } +} + +// WithHTTPClient overrides the default HTTP client used to fetch a remote file. +// +// By default, [http.DefaultClient] is used. +func WithHTTPClient(client *http.Client) Option { + return func(o *options) { + o.client = client + } +} + +// WithFS sets a file system for the local file loader. +// +// If the provided file system is a [fs.ReadFileFS], the ReadFile function is used. +// Otherwise, ReadFile is wrapped using [fs.ReadFile]. +// +// By default, the file system is the one provided by the os package. +// +// For example, this may be set to consume from an embedded file system, or a rooted FS. +func WithFS(filesystem fs.FS) Option { + return func(o *options) { + if rfs, ok := filesystem.(fs.ReadFileFS); ok { + o.fs = rfs + + return + } + o.fs = readFileFS{FS: filesystem} + } +} + +type readFileFS struct { + fs.FS +} + +func (r readFileFS) ReadFile(name string) ([]byte, error) { + return fs.ReadFile(r.FS, name) +} + +func optionsWithDefaults(opts []Option) options { + const defaultTimeout = 30 * time.Second + + o := options{ + // package level defaults + httpOptions: httpOptions{ + httpTimeout: defaultTimeout, + client: http.DefaultClient, + }, + } + + for _, apply := range opts { + apply(&o) + } + + return o +} diff --git a/vendor/github.com/go-openapi/swag/loading/yaml.go b/vendor/github.com/go-openapi/swag/loading/yaml.go new file mode 100644 index 00000000000..3ebb53668c4 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading/yaml.go @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package loading + +import ( + "encoding/json" + "path/filepath" + + "github.com/go-openapi/swag/yamlutils" +) + +// YAMLMatcher matches yaml for a file loader. +func YAMLMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".yaml" || ext == ".yml" +} + +// YAMLDoc loads a yaml document from either http or a file and converts it to json. +func YAMLDoc(path string, opts ...Option) (json.RawMessage, error) { + yamlDoc, err := YAMLData(path, opts...) + if err != nil { + return nil, err + } + + return yamlutils.YAMLToJSON(yamlDoc) +} + +// YAMLData loads a yaml document from either http or a file. +func YAMLData(path string, opts ...Option) (any, error) { + data, err := LoadFromFileOrHTTP(path, opts...) + if err != nil { + return nil, err + } + + return yamlutils.BytesToYAMLDoc(data) +} diff --git a/vendor/github.com/go-openapi/swag/loading_iface.go b/vendor/github.com/go-openapi/swag/loading_iface.go new file mode 100644 index 00000000000..27ec3fb8c37 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading_iface.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "encoding/json" + "time" + + "github.com/go-openapi/swag/loading" +) + +var ( + // Package-level defaults for the file loading utilities (deprecated). + + // LoadHTTPTimeout the default timeout for load requests. + // + // Deprecated: use [loading.WithTimeout] instead. + LoadHTTPTimeout = 30 * time.Second + + // LoadHTTPBasicAuthUsername the username to use when load requests require basic auth. + // + // Deprecated: use [loading.WithBasicAuth] instead. + LoadHTTPBasicAuthUsername = "" + + // LoadHTTPBasicAuthPassword the password to use when load requests require basic auth. + // + // Deprecated: use [loading.WithBasicAuth] instead. + LoadHTTPBasicAuthPassword = "" + + // LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests. + // + // Deprecated: use [loading.WithCustomHeaders] instead. + LoadHTTPCustomHeaders = map[string]string{} +) + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the provided path. +// +// Deprecated: use [loading.LoadFromFileOrHTTP] instead. +func LoadFromFileOrHTTP(pth string, opts ...loading.Option) ([]byte, error) { + return loading.LoadFromFileOrHTTP(pth, loadingOptionsWithDefaults(opts)...) +} + +// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in +// timeout arg allows for per request overriding of the request timeout. +// +// Deprecated: use [loading.LoadFileOrHTTP] with the [loading.WithTimeout] option instead. +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration, opts ...loading.Option) ([]byte, error) { + opts = append(opts, loading.WithTimeout(timeout)) + + return LoadFromFileOrHTTP(pth, opts...) +} + +// LoadStrategy returns a loader function for a given path or URL. +// +// Deprecated: use [loading.LoadStrategy] instead. +func LoadStrategy(pth string, local, remote func(string) ([]byte, error), opts ...loading.Option) func(string) ([]byte, error) { + return loading.LoadStrategy(pth, local, remote, loadingOptionsWithDefaults(opts)...) +} + +// YAMLMatcher matches yaml for a file loader. +// +// Deprecated: use [loading.YAMLMatcher] instead. +func YAMLMatcher(path string) bool { return loading.YAMLMatcher(path) } + +// YAMLDoc loads a yaml document from either http or a file and converts it to json. +// +// Deprecated: use [loading.YAMLDoc] instead. +func YAMLDoc(path string) (json.RawMessage, error) { + return loading.YAMLDoc(path) +} + +// YAMLData loads a yaml document from either http or a file. +// +// Deprecated: use [loading.YAMLData] instead. +func YAMLData(path string) (any, error) { + return loading.YAMLData(path) +} + +// loadingOptionsWithDefaults bridges deprecated default settings that use package-level variables, +// with the recommended use of loading.Option. +func loadingOptionsWithDefaults(opts []loading.Option) []loading.Option { + o := []loading.Option{ + loading.WithTimeout(LoadHTTPTimeout), + loading.WithBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword), + loading.WithCustomHeaders(LoadHTTPCustomHeaders), + } + o = append(o, opts...) + + return o +} diff --git a/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md new file mode 100644 index 00000000000..6674c63b729 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md @@ -0,0 +1,90 @@ +# Benchmarking name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +## Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +## Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` + +## Benchmarks at d7d2d1b895f5b6747afaff312dd2a402e69e818b + +go1.24 + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 19757858 1881 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 17494111 2094 ns/op 74 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 28161226 1492 ns/op 158 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 23787333 1489 ns/op 158 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 17537257 2030 ns/op 103 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 16977453 2156 ns/op 105 B/op 6 allocs/op +``` + +## Benchmarks after PR #106 + +Moving the scope of everything down to a struct allowed to reduce a bit garbage and pooling. + +On top of that, ToGoName (and thus ToVarName) have been subject to a minor optimization, removing a few allocations. + +Overall timings improve by ~ -10%. + +go1.24 + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag/mangling +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 22496130 1618 ns/op 31 B/op 3 allocs/op +BenchmarkToXXXName/ToVarName-16 22538068 1618 ns/op 33 B/op 3 allocs/op +BenchmarkToXXXName/ToFileName-16 27722977 1236 ns/op 105 B/op 6 allocs/op +BenchmarkToXXXName/ToCommandName-16 27967395 1258 ns/op 105 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18587901 1917 ns/op 103 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17193208 2019 ns/op 108 B/op 7 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/mangling/LICENSE b/vendor/github.com/go-openapi/swag/mangling/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/mangling/doc.go b/vendor/github.com/go-openapi/swag/mangling/doc.go new file mode 100644 index 00000000000..ce0d8904857 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/doc.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package mangling provides name mangling capabilities. +// +// Name mangling is an important stage when generating code: +// it helps construct safe program identifiers that abide by the language rules +// and play along with linters. +// +// Examples: +// +// Suppose we get an object name taken from an API spec: "json_object", +// +// We may generate a legit go type name using [NameMangler.ToGoName]: "JsonObject". +// +// We may then locate this type in a source file named using [NameMangler.ToFileName]: "json_object.go". +// +// The methods exposed by the NameMangler are used to generate code in many different contexts, such as: +// +// - generating exported or unexported go identifiers from a JSON schema or an API spec +// - generating file names +// - generating human-readable comments for types and variables +// - generating JSON-like API identifiers from go code +// - ... +package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/initialism_index.go b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go new file mode 100644 index 00000000000..e5b70c14938 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go @@ -0,0 +1,270 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// DefaultInitialisms returns all the initialisms configured by default for this package. +// +// # Motivation +// +// Common initialisms are acronyms for which the ordinary camel-casing rules are altered and +// for which we retain the original case. +// +// This is largely specific to the go naming conventions enforced by golint (now revive). +// +// # Example +// +// In go, "id" is a good-looking identifier, but "Id" is not and "ID" is preferred +// (notice that this stems only from conventions: the go compiler accepts all of these). +// +// Similarly, we may use "http", but not "Http". In this case, "HTTP" is preferred. +// +// # Reference and customization +// +// The default list of these casing-style exceptions is taken from the [github.com/mgechev/revive] linter for go: +// https://github.com/mgechev/revive/blob/master/lint/name.go#L93 +// +// There are a few additions to the original list, such as IPv4, IPv6 and OAI ("OpenAPI"). +// +// For these additions, "IPv4" would be preferred to "Ipv4" or "IPV4", and "OAI" to "Oai" +// +// You may redefine this list entirely using the mangler option [WithInitialisms], or simply add extra definitions +// using [WithAdditionalInitialisms]. +// +// # Mixed-case and plurals +// +// Notice that initialisms are not necessarily fully upper-cased: a mixed-case initialism indicates the preferred casing. +// +// Obviously, lower-case only initialisms do not make a lot of sense: if lower-case only initialisms are added, +// they will be considered fully capitalized. +// +// Plural forms use mixed case like "IDs". And so do values like "IPv4" or "IPv6". +// +// The [NameMangler] automatically detects simple plurals for words such as "IDs" or "APIs", +// so you don't need to configure these variants. +// +// At this moment, it doesn't support pluralization of terms that ends with an 's' (or 'S'), since there is +// no clear consensus on whether a word like DNS should be pluralized as DNSes or remain invariant. +// The [NameMangler] consider those invariant. Therefore DNSs or DNSes are not recognized as plurals for DNS. +// +// Besids, we don't want to support pluralization of terms which would otherwise conflict with another one, +// like "HTTPs" vs "HTTPS". All these should be considered invariant. Hence: "Https" matches "HTTPS" and +// "HTTPSS" is "HTTPS" followed by "S". +func DefaultInitialisms() []string { + return []string{ + "ACL", + "API", + "ASCII", + "CPU", + "CSS", + "DNS", + "EOF", + "GUID", + "HTML", + "HTTPS", + "HTTP", + "ID", + "IP", + "IPv4", // prefer the mixed case outcome IPv4 over the capitalized IPV4 + "IPv6", // prefer the mixed case outcome IPv6 over the capitalized IPV6 + "JSON", + "LHS", + "OAI", + "QPS", + "RAM", + "RHS", + "RPC", + "SLA", + "SMTP", + "SQL", + "SSH", + "TCP", + "TLS", + "TTL", + "UDP", + "UI", + "UID", + "UUID", + "URI", + "URL", + "UTF8", + "VM", + "XML", + "XMPP", + "XSRF", + "XSS", + } +} + +type indexOfInitialisms struct { + initialismsCache + + index map[string]struct{} +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + index: make(map[string]struct{}), + } +} + +func (m *indexOfInitialisms) add(words ...string) *indexOfInitialisms { + for _, word := range words { + // sanitization of injected words: trimmed from blanks, and must start with a letter + trimmed := strings.TrimSpace(word) + + firstRune, _ := utf8.DecodeRuneInString(trimmed) + if !unicode.IsLetter(firstRune) { + continue + } + + // Initialisms are case-sensitive. This means that we support mixed-case words. + // However, if specified as a lower-case string, the initialism should be fully capitalized. + if trimmed == strings.ToLower(trimmed) { + m.index[strings.ToUpper(trimmed)] = struct{}{} + + continue + } + + m.index[trimmed] = struct{}{} + } + return m +} + +func (m *indexOfInitialisms) sorted() []string { + result := make([]string, 0, len(m.index)) + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byInitialism(result))) + return result +} + +func (m *indexOfInitialisms) buildCache() { + m.build(m.sorted(), m.pluralForm) +} + +// initialismsCache caches all needed pre-computed and converted initialism entries, +// in the desired resolution order. +type initialismsCache struct { + initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + initialismsPluralForm []pluralForm +} + +func (c *initialismsCache) build(in []string, pluralfunc func(string) pluralForm) { + c.initialisms = in + c.initialismsRunes = asRunes(c.initialisms) + c.initialismsUpperCased = asUpperCased(c.initialisms) + c.initialismsPluralForm = asPluralForms(c.initialisms, pluralfunc) +} + +// pluralForm denotes the kind of pluralization to be used for initialisms. +// +// At this moment, initialisms are either invariant or follow a simple plural form with an +// extra (lower case) "s". +type pluralForm uint8 + +const ( + notPlural pluralForm = iota + invariantPlural + simplePlural +) + +func (f pluralForm) String() string { + switch f { + case notPlural: + return "notPlural" + case invariantPlural: + return "invariantPlural" + case simplePlural: + return "simplePlural" + default: + return "" + } +} + +// pluralForm indicates how we want to pluralize a given initialism. +// +// Besides configured invariant forms (like HTTP and HTTPS), +// an initialism is normally pluralized by adding a single 's', like in IDs. +// +// Initialisms ending with an 'S' or an 's' are configured as invariant (we don't +// support plural forms like CSSes or DNSes, however the mechanism could be extended to +// do just that). +func (m *indexOfInitialisms) pluralForm(key string) pluralForm { + if _, ok := m.index[key]; !ok { + return notPlural + } + + if strings.HasSuffix(strings.ToUpper(key), "S") { + return invariantPlural + } + + if _, ok := m.index[key+"s"]; ok { + return invariantPlural + } + + if _, ok := m.index[key+"S"]; ok { + return invariantPlural + } + + return simplePlural +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less specifies the order in which initialisms are prioritized: +// 1. match longest first +// 2. when equal length, match in reverse lexicographical order, lower case match comes first +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return s[i] < s[j] +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +// asPluralForms bakes an index of pluralization support. +func asPluralForms(in []string, pluralFunc func(string) pluralForm) []pluralForm { + out := make([]pluralForm, len(in)) + for i, initialism := range in { + out[i] = pluralFunc(initialism) + } + + return out +} diff --git a/vendor/github.com/go-openapi/swag/mangling/name_lexem.go b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go new file mode 100644 index 00000000000..bc837e3b9f5 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go @@ -0,0 +1,186 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "bytes" + "strings" + "unicode" + "unicode/utf8" +) + +type ( + lexemKind uint8 + + nameLexem struct { + original string + matchedInitialism string + kind lexemKind + } +) + +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName +) + +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, + original: original, + matchedInitialism: matchedInitialism, + } +} + +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, + original: trim(original), // TODO: save on calls to trim + } +} + +// WriteTitleized writes the titleized lexeme to a bytes.Buffer. +// +// If the first letter cannot be capitalized, it doesn't write anything and return false, +// so the caller may attempt some workaround strategy. +func (l nameLexem) WriteTitleized(w *bytes.Buffer, alwaysUpper bool) bool { + if l.kind == lexemKindInitialismName { + w.WriteString(l.matchedInitialism) + + return true + } + + if len(l.original) == 0 { + return true + } + + if len(l.original) == 1 { + // identifier is too short: casing will depend on the context + firstByte := l.original[0] + switch { + case 'A' <= firstByte && firstByte <= 'Z': + // safe + w.WriteByte(firstByte) + + return true + case alwaysUpper && 'a' <= firstByte && firstByte <= 'z': + w.WriteByte(firstByte - 'a' + 'A') + + return true + default: + + // not a letter: skip and let the caller decide + return false + } + } + + if firstByte := l.original[0]; firstByte < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= firstByte && firstByte <= 'Z': + // already an upper case letter + w.WriteString(l.original) + + return true + case 'a' <= firstByte && firstByte <= 'z': + w.WriteByte(firstByte - 'a' + 'A') + w.WriteString(l.original[1:]) + + return true + default: + // not a good candidate: doesn't start with a letter + return false + } + } + + // unicode + firstRune, idx := utf8.DecodeRuneInString(l.original) + if !unicode.IsLetter(firstRune) || !unicode.IsUpper(unicode.ToUpper(firstRune)) { + // not a good candidate: doesn't start with a letter + // or a rune for which case doesn't make sense (e.g. East-Asian runes etc) + return false + } + + rest := l.original[idx:] + w.WriteRune(unicode.ToUpper(firstRune)) + w.WriteString(strings.ToLower(rest)) + + return true +} + +// WriteLower is like write titleized but it writes a lower-case version of the lexeme. +// +// Similarly, there is no writing if the casing of the first rune doesn't make sense. +func (l nameLexem) WriteLower(w *bytes.Buffer, alwaysLower bool) bool { + if l.kind == lexemKindInitialismName { + w.WriteString(lower(l.matchedInitialism)) + + return true + } + + if len(l.original) == 0 { + return true + } + + if len(l.original) == 1 { + // identifier is too short: casing will depend on the context + firstByte := l.original[0] + switch { + case 'a' <= firstByte && firstByte <= 'z': + // safe + w.WriteByte(firstByte) + + return true + case alwaysLower && 'A' <= firstByte && firstByte <= 'Z': + w.WriteByte(firstByte - 'A' + 'a') + + return true + default: + + // not a letter: skip and let the caller decide + return false + } + } + + if firstByte := l.original[0]; firstByte < utf8.RuneSelf { + // ASCII + switch { + case 'a' <= firstByte && firstByte <= 'z': + // already a lower case letter + w.WriteString(l.original) + + return true + case 'A' <= firstByte && firstByte <= 'Z': + w.WriteByte(firstByte - 'A' + 'a') + w.WriteString(l.original[1:]) + + return true + default: + // not a good candidate: doesn't start with a letter + return false + } + } + + // unicode + firstRune, idx := utf8.DecodeRuneInString(l.original) + if !unicode.IsLetter(firstRune) || !unicode.IsLower(unicode.ToLower(firstRune)) { + // not a good candidate: doesn't start with a letter + // or a rune for which case doesn't make sense (e.g. East-Asian runes etc) + return false + } + + rest := l.original[idx:] + w.WriteRune(unicode.ToLower(firstRune)) + w.WriteString(rest) + + return true +} + +func (l nameLexem) GetOriginal() string { + return l.original +} + +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName +} diff --git a/vendor/github.com/go-openapi/swag/mangling/name_mangler.go b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go new file mode 100644 index 00000000000..da685681d08 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go @@ -0,0 +1,370 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "strings" + "unicode" +) + +// NameMangler knows how to transform sentences or words into +// identifiers that are a better fit in contexts such as: +// +// - unexported or exported go variable identifiers +// - file names +// - camel cased identifiers +// - ... +// +// The [NameMangler] is safe for concurrent use, save for its [NameMangler.AddInitialisms] method, +// which is not. +// +// # Known limitations +// +// At this moment, the [NameMangler] doesn't play well with "all caps" text: +// +// unless every single upper-cased word is declared as an initialism, capitalized words would generally +// not be transformed with the expected result, e.g. +// +// ToFileName("THIS_IS_ALL_CAPS") +// +// yields the weird outcome +// +// "t_h_i_s_i_s_a_l_l_c_a_p_s" +type NameMangler struct { + options + + index *indexOfInitialisms + + splitter splitter + splitterWithPostSplit splitter + + _ struct{} +} + +// NewNameMangler builds a name mangler ready to convert strings. +// +// The default name mangler is configured with default common initialisms and all default options. +func NewNameMangler(opts ...Option) NameMangler { + m := NameMangler{ + options: optionsWithDefaults(opts), + index: newIndexOfInitialisms(), + } + m.addInitialisms(m.commonInitialisms...) + + // a splitter that returns matches lexemes as ready-to-assemble strings: + // details of the lexemes are redeemed. + m.splitter = newSplitter( + withInitialismsCache(&m.index.initialismsCache), + withReplaceFunc(m.replaceFunc), + ) + + // a splitter that returns matches lexemes ready for post-processing + m.splitterWithPostSplit = newSplitter( + withInitialismsCache(&m.index.initialismsCache), + withReplaceFunc(m.replaceFunc), + withPostSplitInitialismCheck, + ) + + return m +} + +// AddInitialisms declares extra initialisms to the mangler. +// +// It declares extra words as "initialisms" (i.e. words that won't be camel cased or titled cased), +// on top of the existing list of common initialisms (such as ID, HTTP...). +// +// Added words must start with a (unicode) letter. If some don't, they are ignored. +// Added words are either fully capitalized or mixed-cased. Lower-case only words are considered capitalized. +// +// It is typically used just after initializing the [NameMangler]. +// +// When all initialisms are known at the time the mangler is initialized, it is preferable to +// use [NewNameMangler] with the option [WithAdditionalInitialisms]. +// +// Adding initialisms mutates the mangler and should not be carried out concurrently with other calls to the mangler. +func (m *NameMangler) AddInitialisms(words ...string) { + m.addInitialisms(words...) +} + +// Initialisms renders the list of initialisms supported by this mangler. +func (m *NameMangler) Initialisms() []string { + return m.index.initialisms +} + +// Camelize a single word. +// +// Example: +// +// - "HELLO" and "hello" become "Hello". +func (m NameMangler) Camelize(word string) string { + ru := []rune(word) + + switch len(ru) { + case 0: + return "" + case 1: + return string(unicode.ToUpper(ru[0])) + default: + camelized := poolOfBuffers.BorrowBuffer(len(word)) + camelized.Grow(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + + camelized.WriteRune(unicode.ToUpper(ru[0])) + for _, ru := range ru[1:] { + camelized.WriteRune(unicode.ToLower(ru)) + } + + return camelized.String() + } +} + +// ToFileName generates a suitable snake-case file name from a sentence. +// +// It lower-cases everything with underscore (_) as a word separator. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello_swagger" +// - "HelloSwagger" becomes "hello_swagger" +func (m NameMangler) ToFileName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "_") +} + +// ToCommandName generates a suitable CLI command name from a sentence. +// +// It lower-cases everything with dash (-) as a word separator. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello-swagger" +// - "HelloSwagger" becomes "hello-swagger" +func (m NameMangler) ToCommandName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human-readable series of words. +// +// It lower-cases everything with blank space as a word separator. +// +// NOTE: parts recognized as initialisms just keep their original casing. +// +// Examples: +// +// - "Hello, Swagger" becomes "hello swagger" +// - "HelloSwagger" or "Hello-Swagger" become "hello swagger" +func (m NameMangler) ToHumanNameLower(name string) string { + s := m.splitterWithPostSplit + in := s.split(name) + out := make([]string, 0, len(*in)) + + for _, w := range *in { + if !w.IsInitialism() { + out = append(out, lower(w.GetOriginal())) + } else { + out = append(out, trim(w.GetOriginal())) + } + } + + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human-readable series of titleized words. +// +// It titleizes every word with blank space as a word separator. +// +// Examples: +// +// - "hello, Swagger" becomes "Hello Swagger" +// - "helloSwagger" becomes "Hello Swagger" +func (m NameMangler) ToHumanNameTitle(name string) string { + s := m.splitterWithPostSplit + in := s.split(name) + + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) + if !w.IsInitialism() { + out = append(out, m.Camelize(original)) + } else { + out = append(out, original) + } + } + poolOfLexems.RedeemLexems(in) + + return strings.Join(out, " ") +} + +// ToJSONName generates a camelized single-word version of a sentence. +// +// The output assembles every camelized word, but for the first word, which +// is lower-cased. +// +// Example: +// +// - "Hello_swagger" becomes "helloSwagger" +func (m NameMangler) ToJSONName(name string) string { + inptr := m.split(name) + in := *inptr + out := make([]string, 0, len(in)) + + for i, w := range in { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, m.Camelize(trim(w))) + } + + poolOfStrings.RedeemStrings(inptr) + + return strings.Join(out, "") +} + +// ToVarName generates a legit unexported go variable name from a sentence. +// +// The generated name plays well with linters (see also [NameMangler.ToGoName]). +// +// Examples: +// +// - "Hello_swagger" becomes "helloSwagger" +// - "Http_server" becomes "httpServer" +// +// This name applies the same rules as [NameMangler.ToGoName] (legit exported variable), save the +// capitalization of the initial rune. +// +// Special case: when the initial part is a recognized as an initialism (like in the example above), +// the full part is lower-cased. +func (m NameMangler) ToVarName(name string) string { + return m.goIdentifier(name, false) +} + +// ToGoName generates a legit exported go variable name from a sentence. +// +// The generated name plays well with most linters. +// +// ToGoName abides by the go "exported" symbol rule starting with an upper-case letter. +// +// Examples: +// +// - "hello_swagger" becomes "HelloSwagger" +// - "Http_server" becomes "HTTPServer" +// +// # Edge cases +// +// Whenever the first rune is not eligible to upper case, a special prefix is prepended to the resulting name. +// By default this is simply "X" and you may customize this behavior using the [WithGoNamePrefixFunc] option. +// +// This happens when the first rune is not a letter, e.g. a digit, or a symbol that has no word transliteration +// (see also [WithReplaceFunc] about symbol transliterations), +// as well as for most East Asian or Devanagari runes, for which there is no such concept as upper-case. +// +// # Linting +// +// [revive], the successor of golint is the reference linter. +// +// This means that [NameMangler.ToGoName] supports the initialisms that revive checks (see also [DefaultInitialisms]). +// +// At this moment, there is no attempt to transliterate unicode into ascii, meaning that some linters +// (e.g. asciicheck, gosmopolitan) may croak on go identifiers generated from unicode input. +// +// [revive]: https://github.com/mgechev/revive +func (m NameMangler) ToGoName(name string) string { + return m.goIdentifier(name, true) +} + +func (m NameMangler) goIdentifier(name string, exported bool) string { + s := m.splitterWithPostSplit + lexems := s.split(name) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + firstPart := lexemes[0] + if !exported { + if ok := firstPart.WriteLower(result, true); !ok { + // NOTE: an initialism as the first part is lower-cased: no longer generates stuff like hTTPxyz. + // + // same prefixing rule applied to unexported variable as to an exported one, so that we have consistent + // names, whether the generated identifier is exported or not. + result.WriteString(strings.ToLower(m.prefixFunc()(name))) + result.WriteString(lexemes[0].GetOriginal()) + } + } else { + if ok := firstPart.WriteTitleized(result, true); !ok { + // "repairs" a lexeme that doesn't start with a letter to become + // the start a legit go name. The current strategy is very crude and simply adds a fixed prefix, + // e.g. "X". + // For instance "1_sesame_street" would be split into lexemes ["1", "sesame", "street"] and + // the first one ("1") would result in something like "X1" (with the default prefix function). + // + // NOTE: no longer forcing the first part to be fully upper-cased + result.WriteString(m.prefixFunc()(name)) + result.WriteString(lexemes[0].GetOriginal()) + } + } + + for _, lexem := range lexemes[1:] { + // NOTE: no longer forcing initialism parts to be fully upper-cased: + // * pluralized initialism preserve their trailing "s" + // * mixed-cased initialisms, such as IPv4, are preserved + if ok := lexem.WriteTitleized(result, false); !ok { + // it's not titleized: perhaps it's too short, perhaps the first rune is not a letter. + // write anyway + result.WriteString(lexem.GetOriginal()) + } + } + + return result.String() +} + +func (m *NameMangler) addInitialisms(words ...string) { + m.index.add(words...) + m.index.buildCache() +} + +// split calls the inner splitter. +func (m NameMangler) split(str string) *[]string { + s := m.splitter + lexems := s.split(str) + result := poolOfStrings.BorrowStrings() + + for _, lexem := range *lexems { + *result = append(*result, lexem.GetOriginal()) + } + poolOfLexems.RedeemLexems(lexems) + + return result +} diff --git a/vendor/github.com/go-openapi/swag/mangling/options.go b/vendor/github.com/go-openapi/swag/mangling/options.go new file mode 100644 index 00000000000..3c92b2f18bf --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/options.go @@ -0,0 +1,150 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +type ( + // PrefixFunc defines a safeguard rule (that may depend on the input string), to prefix + // a generated go name (in [NameMangler.ToGoName] and [NameMangler.ToVarName]). + // + // See [NameMangler.ToGoName] for more about which edge cases the prefix function covers. + PrefixFunc func(string) string + + // ReplaceFunc is a transliteration function to replace special runes by a word. + ReplaceFunc func(r rune) (string, bool) + + // Option to configure a [NameMangler]. + Option func(*options) + + options struct { + commonInitialisms []string + + goNamePrefixFunc PrefixFunc + goNamePrefixFuncPtr *PrefixFunc + replaceFunc func(r rune) (string, bool) + } +) + +func (o *options) prefixFunc() PrefixFunc { + if o.goNamePrefixFuncPtr != nil && *o.goNamePrefixFuncPtr != nil { + return *o.goNamePrefixFuncPtr + } + + return o.goNamePrefixFunc +} + +// WithGoNamePrefixFunc overrides the default prefix rule to safeguard generated go names. +// +// Example: +// +// This helps convert "123" into "{prefix}123" (a very crude strategy indeed, but it works). +// +// See [github.com/go-swagger/go-swagger/generator.DefaultFuncMap] for an example. +// +// The prefix function is assumed to return a string that starts with an upper case letter. +// +// The default is to prefix with "X". +// +// See [NameMangler.ToGoName] for more about which edge cases the prefix function covers. +func WithGoNamePrefixFunc(fn PrefixFunc) Option { + return func(o *options) { + o.goNamePrefixFunc = fn + } +} + +// WithGoNamePrefixFuncPtr is like [WithGoNamePrefixFunc] but it specifies a pointer to a function. +// +// [WithGoNamePrefixFunc] should be preferred in most situations. This option should only serve the +// purpose of handling special situations where the prefix function is not an internal variable +// (e.g. an exported package global). +// +// [WithGoNamePrefixFuncPtr] supersedes [WithGoNamePrefixFunc] if it also specified. +// +// If the provided pointer is nil or points to a nil value, this option has no effect. +// +// The caller should ensure that no undesirable concurrent changes are applied to the function pointed to. +func WithGoNamePrefixFuncPtr(ptr *PrefixFunc) Option { + return func(o *options) { + o.goNamePrefixFuncPtr = ptr + } +} + +// WithInitialisms declares the initialisms this mangler supports. +// +// This supersedes any pre-loaded defaults (see [DefaultInitialisms] for more about what initialisms are). +// +// It declares words to be recognized as "initialisms" (i.e. words that won't be camel cased or titled cased). +// +// Words must start with a (unicode) letter. If some don't, they are ignored. +// Words are either fully capitalized or mixed-cased. Lower-case only words are considered capitalized. +func WithInitialisms(words ...string) Option { + return func(o *options) { + o.commonInitialisms = words + } +} + +// WithAdditionalInitialisms adds new initialisms to the currently supported list (see [DefaultInitialisms]). +// +// The same sanitization rules apply as those described for [WithInitialisms]. +func WithAdditionalInitialisms(words ...string) Option { + return func(o *options) { + o.commonInitialisms = append(o.commonInitialisms, words...) + } +} + +// WithReplaceFunc specifies a custom transliteration function instead of the default. +// +// The default translates the following characters into words as follows: +// +// - '@' -> 'At' +// - '&' -> 'And' +// - '|' -> 'Pipe' +// - '$' -> 'Dollar' +// - '!' -> 'Bang' +// +// Notice that the outcome of a transliteration should always be titleized. +func WithReplaceFunc(fn ReplaceFunc) Option { + return func(o *options) { + o.replaceFunc = fn + } +} + +func defaultPrefixFunc(_ string) string { + return "X" +} + +// defaultReplaceTable finds a word representation for special characters. +func defaultReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +func optionsWithDefaults(opts []Option) options { + o := options{ + commonInitialisms: DefaultInitialisms(), + goNamePrefixFunc: defaultPrefixFunc, + replaceFunc: defaultReplaceTable, + } + + for _, apply := range opts { + apply(&o) + } + + return o +} diff --git a/vendor/github.com/go-openapi/swag/mangling/pools.go b/vendor/github.com/go-openapi/swag/mangling/pools.go new file mode 100644 index 00000000000..f8104351445 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/pools.go @@ -0,0 +1,123 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "bytes" + "sync" +) + +const maxAllocMatches = 8 + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool + } + + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + stringsPool struct { + *sync.Pool + } +) + +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfStrings = stringsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]string, 0, maxAllocMatches) + + return &s + }, + }, + } +) + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) + } + + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p stringsPool) BorrowStrings() *[]string { + s := p.Get().(*[]string) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p stringsPool) RedeemStrings(s *[]string) { + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/swag/mangling/split.go b/vendor/github.com/go-openapi/swag/mangling/split.go new file mode 100644 index 00000000000..ed12ea25674 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/split.go @@ -0,0 +1,341 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "fmt" + "unicode" +) + +type splitterOption func(*splitter) + +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) { + s.postSplitInitialismCheck = true +} + +func withReplaceFunc(fn ReplaceFunc) func(*splitter) { + return func(s *splitter) { + s.replaceFunc = fn + } +} + +func withInitialismsCache(c *initialismsCache) splitterOption { + return func(s *splitter) { + s.initialismsCache = c + } +} + +type ( + initialismMatch struct { + body []rune + start, end int + complete bool + hasPlural pluralForm + } + initialismMatches []initialismMatch +) + +// String representation of a match, e.g. for debugging. +func (m initialismMatch) String() string { + return fmt.Sprintf("{body: %s (%d), start: %d, end; %d, complete: %t, hasPlural: %v}", + string(m.body), len(m.body), m.start, m.end, m.complete, m.hasPlural, + ) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +type splitter struct { + *initialismsCache + + postSplitInitialismCheck bool + replaceFunc ReplaceFunc +} + +func newSplitter(options ...splitterOption) splitter { + var s splitter + + for _, option := range options { + option(&s) + } + + if s.replaceFunc == nil { + s.replaceFunc = defaultReplaceTable + } + + return s +} + +func (s splitter) split(name string) *[]nameLexem { + nameRunes := []rune(name) + matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + + return s.mapMatchesToNameLexems(nameRunes, matches) +} + +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + matches := poolOfMatches.BorrowMatches() + const minLenInitialism = 1 + if len(nameRunes) < minLenInitialism+1 { + // can't match initialism with 0 or 1 rune + return matches + } + + // first iteration + s.findMatches(matches, nameRunes, nameRunes[0], 0) + + for i, currentRune := range nameRunes[1:] { + currentRunePosition := i + 1 + // recycle allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + // + // BorrowMatches always yields slices with zero length (with some capacity) + newMatches := poolOfMatches.BorrowMatches() + + // check current initialism matches + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + // the match is already complete: keep it then move on to the next match + *newMatches = append(*newMatches, match) + continue + } + + if currentRunePosition-match.start == len(match.body) { + // unmatched: skip + continue + } + + // 1. by construction of the matches, we can't have currentRunePosition - match.start < 0 + // because matches have been computed with their start <= currentRunePosition in the previous + // iterations. + // 2. by construction of the matches, we can't have currentRunePosition - match.start >= len(match.body) + + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + // failed match, discard it then move on to the next match + continue + } + + // try to complete the current match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close: the next step is to check the symbol ahead + // if it is a lowercase letter, then it is not the end of match + // but the beginning of the next word. + // + // NOTE(fredbi): this heuristic sometimes leads to counterintuitive splits and + // perhaps (not sure yet) we should check against case _alternance_. + // + // Example: + // + // In the current version, in the sentence "IDS initialism", "ID" is recognized as an initialism, + // leading to a split like "id_s_initialism" (or IDSInitialism), + // whereas in the sentence "IDx initialism", it is not and produces something like + // "i_d_x_initialism" (or IDxInitialism). The generated file name is not great. + // + // Both go identifiers are tolerated by linters. + // + // Notice that the slightly different input "IDs initialism" is correctly detected + // as a pluralized initialism and produces something like "ids_initialism" (or IDsInitialism). + + if currentRunePosition < len(nameRunes)-1 { // when before the last rune + nextRune := nameRunes[currentRunePosition+1] + + // recognize a plural form for this initialism (only simple english pluralization is supported). + if nextRune == 's' && match.hasPlural == simplePlural { + // detected a pluralized initialism + match.body = append(match.body, nextRune) + lookAhead := currentRunePosition + 1 + if lookAhead < len(nameRunes)-1 { + nextRune = nameRunes[lookAhead+1] + if newWord := unicode.IsLower(nextRune); newWord { + // it is the start of a new word. + // Match is only partial and the initialism is not recognized: + // move on to the next match, but do not advance the rune position + continue + } + } + + // this is a pluralized match: keep it + currentRunePosition++ + match.complete = true + match.hasPlural = simplePlural + match.end = currentRunePosition + *newMatches = append(*newMatches, match) + + // match is complete: keep it then move on to the next match + continue + } + + // other cases + // example: invariant plural such as "TLS" + if newWord := unicode.IsLower(nextRune); newWord { + // it is the start of a new word + // Match is only partial and the initialism is not recognized : move on + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + // append the ongoing matching attempt: it is not necessarily complete, but was successful so far. + // Let's see if it still matches on the next rune. + *newMatches = append(*newMatches, match) + } + + s.findMatches(newMatches, nameRunes, currentRune, currentRunePosition) + + poolOfMatches.RedeemMatches(matches) + matches = newMatches + } + + // it is up to the caller to redeem this last slice + return matches +} + +func (s splitter) findMatches(newMatches *initialismMatches, nameRunes []rune, currentRune rune, currentRunePosition int) { + // check for new initialism matches, based on the first character + for i, r := range s.initialismsRunes { + if r[0] != currentRune { + continue + } + + if currentRunePosition+len(r) > len(nameRunes) { + continue // not eligible: would spilll over the initial string + } + + // possible matches: all initialisms starting with the current rune and that can fit the given string (nameRunes) + *newMatches = append(*newMatches, initialismMatch{ + start: currentRunePosition, + body: r, + complete: false, + hasPlural: s.initialismsPluralForm[i], + }) + } +} + +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() + + var lastAcceptedMatch initialismMatch + for _, match := range *matches { + if !match.complete { + continue + } + + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + + continue + } + + if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch { + continue + } + + middle := nameRunes[lastAcceptedMatch.end+1 : match.start] + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + } + + // we have not found any accepted matches + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + s.appendBrokenDownCasualString(nameLexems, rest) + } + + poolOfMatches.RedeemMatches(matches) + + return nameLexems +} + +func (s splitter) breakInitialism(original string) nameLexem { + return newInitialismNameLexem(original, original) +} + +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() + + addCasualNameLexem := func(original string) { + *segments = append(*segments, newCasualNameLexem(original)) + } + + addInitialismNameLexem := func(original, match string) { + *segments = append(*segments, newInitialismNameLexem(original, match)) + } + + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + + return + } + } + + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem + } + + // NOTE: (performance). The few remaining non-amortized allocations + // lay in the code below: using String() forces + for _, rn := range str { + if replace, found := s.replaceFunc(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() + } + + if replace != "" { + addNameLexem(replace) + } + + continue + } + + if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() + } + + continue + } + + if unicode.IsUpper(rn) { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + } + currentSegment.Reset() + } + + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + } +} diff --git a/vendor/github.com/go-openapi/swag/mangling/string_bytes.go b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go new file mode 100644 index 00000000000..28daaf72b1a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go @@ -0,0 +1,11 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/vendor/github.com/go-openapi/swag/mangling/util.go b/vendor/github.com/go-openapi/swag/mangling/util.go new file mode 100644 index 00000000000..0636417e360 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling/util.go @@ -0,0 +1,118 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package mangling + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +// Removes leading whitespaces +func trim(str string) string { return strings.TrimSpace(str) } + +// upper is strings.ToUpper() combined with trim +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// lower is strings.ToLower() combined with trim +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && ((c < 'a') || (c > 'z') || (c-'a'+'A' != baseChar)) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } + + i += size + } + + return true +} diff --git a/vendor/github.com/go-openapi/swag/mangling_iface.go b/vendor/github.com/go-openapi/swag/mangling_iface.go new file mode 100644 index 00000000000..98b9a999293 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/mangling_iface.go @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/mangling" + +// GoNamePrefixFunc sets an optional rule to prefix go names +// which do not start with a letter. +// +// GoNamePrefixFunc should not be written to while concurrently using the other mangling functions of this package. +// +// Deprecated: use [mangling.WithGoNamePrefixFunc] instead. +var GoNamePrefixFunc mangling.PrefixFunc + +// swagNameMangler is a global instance of the name mangler specifically alloted +// to support deprecated functions. +var swagNameMangler = mangling.NewNameMangler( + mangling.WithGoNamePrefixFuncPtr(&GoNamePrefixFunc), +) + +// AddInitialisms adds additional initialisms to the default list (see [mangling.DefaultInitialisms]). +// +// AddInitialisms is not safe to be called concurrently. +// +// Deprecated: use [mangling.WithAdditionalInitialisms] instead. +func AddInitialisms(words ...string) { + swagNameMangler.AddInitialisms(words...) +} + +// Camelize a single word. +// +// Deprecated: use [mangling.NameMangler.Camelize] instead. +func Camelize(word string) string { return swagNameMangler.Camelize(word) } + +// ToFileName lowercases and underscores a go type name. +// +// Deprecated: use [mangling.NameMangler.ToFileName] instead. +func ToFileName(name string) string { return swagNameMangler.ToFileName(name) } + +// ToCommandName lowercases and underscores a go type name. +// +// Deprecated: use [mangling.NameMangler.ToCommandName] instead. +func ToCommandName(name string) string { return swagNameMangler.ToCommandName(name) } + +// ToHumanNameLower represents a code name as a human series of words. +// +// Deprecated: use [mangling.NameMangler.ToHumanNameLower] instead. +func ToHumanNameLower(name string) string { return swagNameMangler.ToHumanNameLower(name) } + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized. +// +// Deprecated: use [mangling.NameMangler.ToHumanNameTitle] instead. +func ToHumanNameTitle(name string) string { return swagNameMangler.ToHumanNameTitle(name) } + +// ToJSONName camel-cases a name which can be underscored or pascal-cased. +// +// Deprecated: use [mangling.NameMangler.ToJSONName] instead. +func ToJSONName(name string) string { return swagNameMangler.ToJSONName(name) } + +// ToVarName camel-cases a name which can be underscored or pascal-cased. +// +// Deprecated: use [mangling.NameMangler.ToVarName] instead. +func ToVarName(name string) string { return swagNameMangler.ToVarName(name) } + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes. +// +// Deprecated: use [mangling.NameMangler.ToGoName] instead. +func ToGoName(name string) string { return swagNameMangler.ToGoName(name) } diff --git a/vendor/github.com/go-openapi/swag/netutils/LICENSE b/vendor/github.com/go-openapi/swag/netutils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/netutils/doc.go b/vendor/github.com/go-openapi/swag/netutils/doc.go new file mode 100644 index 00000000000..74282f8e51c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package netutils provides helpers for network-related tasks. +package netutils diff --git a/vendor/github.com/go-openapi/swag/netutils/net.go b/vendor/github.com/go-openapi/swag/netutils/net.go new file mode 100644 index 00000000000..82a1544af7b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils/net.go @@ -0,0 +1,31 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package netutils + +import ( + "net" + "strconv" +) + +// SplitHostPort splits a network address into a host and a port. +// +// The difference with the standard net.SplitHostPort is that the port is converted to an int. +// +// The port is -1 when there is no port to be found. +func SplitHostPort(addr string) (host string, port int, err error) { + h, p, err := net.SplitHostPort(addr) + if err != nil { + return "", -1, err + } + if p == "" { + return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} + } + + pi, err := strconv.Atoi(p) + if err != nil { + return "", -1, err + } + + return h, pi, nil +} diff --git a/vendor/github.com/go-openapi/swag/netutils_iface.go b/vendor/github.com/go-openapi/swag/netutils_iface.go new file mode 100644 index 00000000000..d658de25b3f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/netutils_iface.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/netutils" + +// SplitHostPort splits a network address into a host and a port. +// +// Deprecated: use [netutils.SplitHostPort] instead. +func SplitHostPort(addr string) (host string, port int, err error) { + return netutils.SplitHostPort(addr) +} diff --git a/vendor/github.com/go-openapi/swag/stringutils/LICENSE b/vendor/github.com/go-openapi/swag/stringutils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go new file mode 100644 index 00000000000..28056ad25c3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package stringutils + +import "strings" + +const ( + // collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" + + collectionFormatDefaultSep = "," +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return data + default: + sep = collectionFormatDefaultSep + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return nil + default: + sep = collectionFormatDefaultSep + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} diff --git a/vendor/github.com/go-openapi/swag/stringutils/doc.go b/vendor/github.com/go-openapi/swag/stringutils/doc.go new file mode 100644 index 00000000000..c6d17a1160b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package stringutils exposes helpers to search and process strings. +package stringutils diff --git a/vendor/github.com/go-openapi/swag/stringutils/strings.go b/vendor/github.com/go-openapi/swag/stringutils/strings.go new file mode 100644 index 00000000000..cd792b7d083 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils/strings.go @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package stringutils + +import ( + "slices" + "strings" +) + +// ContainsStrings searches a slice of strings for a case-sensitive match +// +// Now equivalent to the standard library [slice.Contains]. +func ContainsStrings(coll []string, item string) bool { + return slices.Contains(coll, item) +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + return slices.ContainsFunc(coll, func(e string) bool { + return strings.EqualFold(e, item) + }) +} diff --git a/vendor/github.com/go-openapi/swag/stringutils_iface.go b/vendor/github.com/go-openapi/swag/stringutils_iface.go new file mode 100644 index 00000000000..dbfa4848430 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/stringutils_iface.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/stringutils" + +// ContainsStrings searches a slice of strings for a case-sensitive match. +// +// Deprecated: use [slices.Contains] or [stringutils.ContainsStrings] instead. +func ContainsStrings(coll []string, item string) bool { + return stringutils.ContainsStrings(coll, item) +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match. +// +// Deprecated: use [stringutils.ContainsStringsCI] instead. +func ContainsStringsCI(coll []string, item string) bool { + return stringutils.ContainsStringsCI(coll, item) +} + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute). +// +// Deprecated: use [stringutils.JoinByFormat] instead. +func JoinByFormat(data []string, format string) []string { + return stringutils.JoinByFormat(data, format) +} + +// SplitByFormat splits a string by a known format. +// +// Deprecated: use [stringutils.SplitByFormat] instead. +func SplitByFormat(data, format string) []string { + return stringutils.SplitByFormat(data, format) +} diff --git a/vendor/github.com/go-openapi/swag/typeutils/LICENSE b/vendor/github.com/go-openapi/swag/typeutils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/typeutils/doc.go b/vendor/github.com/go-openapi/swag/typeutils/doc.go new file mode 100644 index 00000000000..66bed20dff0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package typeutils exposes utilities to inspect generic types. +package typeutils diff --git a/vendor/github.com/go-openapi/swag/typeutils/types.go b/vendor/github.com/go-openapi/swag/typeutils/types.go new file mode 100644 index 00000000000..55487a673c4 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils/types.go @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package typeutils + +import "reflect" + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data any) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { //nolint:exhaustive + case + reflect.Interface, + reflect.Func, + reflect.Chan, + reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice: + if v.IsNil() { + return true + } + } + + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + + // continue with slightly more complex reflection + switch v.Kind() { //nolint:exhaustive + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + default: + return false + } +} + +// IsNil checks if input is nil. +// +// For types chan, func, interface, map, pointer, or slice it returns true if its argument is nil. +// +// See [reflect.Value.IsNil]. +func IsNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice, + reflect.Chan, + reflect.Interface, + reflect.Func: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} diff --git a/vendor/github.com/go-openapi/swag/typeutils_iface.go b/vendor/github.com/go-openapi/swag/typeutils_iface.go new file mode 100644 index 00000000000..b63813ea408 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/typeutils_iface.go @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import "github.com/go-openapi/swag/typeutils" + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +// +// Deprecated: use [typeutils.IsZero] instead. +func IsZero(data any) bool { return typeutils.IsZero(data) } diff --git a/vendor/github.com/go-openapi/swag/yamlutils/LICENSE b/vendor/github.com/go-openapi/swag/yamlutils/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/yamlutils/doc.go b/vendor/github.com/go-openapi/swag/yamlutils/doc.go new file mode 100644 index 00000000000..7bb92a82f1b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/doc.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package yamlutils provides utilities to work with YAML documents. +// +// - [BytesToYAMLDoc] to construct a [yaml.Node] document +// - [YAMLToJSON] to convert a [yaml.Node] document to JSON bytes +// - [YAMLMapSlice] to serialize and deserialize YAML with the order of keys maintained +package yamlutils + +import ( + _ "go.yaml.in/yaml/v3" // for documentation purpose only +) diff --git a/vendor/github.com/go-openapi/swag/yamlutils/errors.go b/vendor/github.com/go-openapi/swag/yamlutils/errors.go new file mode 100644 index 00000000000..e87bc5e8beb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/errors.go @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlutils + +type yamlError string + +const ( + // ErrYAML is an error raised by YAML utilities + ErrYAML yamlError = "yaml error" +) + +func (e yamlError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go new file mode 100644 index 00000000000..3daf68dbba0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go @@ -0,0 +1,316 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlutils + +import ( + "fmt" + "iter" + "slices" + "sort" + "strconv" + + "github.com/go-openapi/swag/conv" + "github.com/go-openapi/swag/jsonutils" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" + yaml "go.yaml.in/yaml/v3" +) + +var ( + _ yaml.Marshaler = YAMLMapSlice{} + _ yaml.Unmarshaler = &YAMLMapSlice{} +) + +// YAMLMapSlice represents a YAML object, with the order of keys maintained. +// +// It is similar to [jsonutils.JSONMapSlice] and also knows how to marshal and unmarshal YAML. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. +type YAMLMapSlice []YAMLMapItem + +// YAMLMapItem represents the value of a key in a YAML object held by [YAMLMapSlice]. +// +// It is entirely equivalent to [jsonutils.JSONMapItem], with the same limitation that +// you should not Marshal or Unmarshal directly this type, outside of a [YAMLMapSlice]. +type YAMLMapItem = jsonutils.JSONMapItem + +func (s YAMLMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +// SetOrderedItems implements [ifaces.SetOrdered]: it merges keys passed by the iterator argument +// into the [YAMLMapSlice]. +func (s *YAMLMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders this YAML object as JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. +func (s YAMLMapSlice) MarshalJSON() ([]byte, error) { + return jsonutils.JSONMapSlice(s).MarshalJSON() +} + +// UnmarshalJSON builds this YAML object from JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. +func (s *YAMLMapSlice) UnmarshalJSON(data []byte) error { + js := jsonutils.JSONMapSlice(*s) + + if err := js.UnmarshalJSON(data); err != nil { + return err + } + + *s = YAMLMapSlice(js) + + return nil +} + +// MarshalYAML produces a YAML document as bytes +// +// The difference with standard YAML marshaling is that the order of keys is maintained. +// +// It implements [yaml.Marshaler]. +func (s YAMLMapSlice) MarshalYAML() (any, error) { + if typeutils.IsNil(s) { + return []byte("null\n"), nil + } + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +// UnmarshalYAML builds a YAMLMapSlice object from a YAML document [yaml.Node]. +// +// It implements [yaml.Unmarshaler]. +func (s *YAMLMapSlice) UnmarshalYAML(node *yaml.Node) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = YAMLMapSlice{} + } + if node == nil { + *s = nil + return nil + } + + const sensibleAllocDivider = 2 + m := slices.Grow(*s, len(node.Content)/sensibleAllocDivider) + m = m[:0] + + for i := 0; i < len(node.Content); i += 2 { + var nmi YAMLMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML) + } + nmi.Value = v + m = append(m, nmi) + } + + *s = m + + return nil +} + +func json2yaml(item any) (*yaml.Node, error) { + if typeutils.IsNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + + switch val := item.(type) { + case ifaces.Ordered: + return orderedYAML(val) + + case map[string]any: + var n yaml.Node + n.Kind = yaml.MappingNode + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + + case []any: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float32: + return floatNode(val) + case float64: + return floatNode(val) + case int: + return integerNode(val) + case int8: + return integerNode(val) + case int16: + return integerNode(val) + case int32: + return integerNode(val) + case int64: + return integerNode(val) + case uint: + return uintegerNode(val) + case uint8: + return uintegerNode(val) + case uint16: + return uintegerNode(val) + case uint32: + return uintegerNode(val) + case uint64: + return uintegerNode(val) + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + default: + return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML) + } +} + +func floatNode[T conv.Float](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: conv.FormatFloat(val), + }, nil +} + +func integerNode[T conv.Signed](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatInteger(val), + }, nil +} + +func uintegerNode[T conv.Unsigned](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatUinteger(val), + }, nil +} + +func orderedYAML[T ifaces.Ordered](val T) (*yaml.Node, error) { + var n yaml.Node + n.Kind = yaml.MappingNode + for key, value := range val.OrderedItems() { + childNode, err := json2yaml(value) + if err != nil { + return nil, err + } + + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: key, + }, childNode) + } + return &n, nil +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/yaml.go b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go new file mode 100644 index 00000000000..e3aff3c2fde --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go @@ -0,0 +1,211 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package yamlutils + +import ( + json "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/swag/jsonutils" + yaml "go.yaml.in/yaml/v3" +) + +// YAMLToJSON converts a YAML document into JSON bytes. +// +// Note: a YAML document is the output from a [yaml.Marshaler], e.g a pointer to a [yaml.Node]. +// +// [YAMLToJSON] is typically called after [BytesToYAMLDoc]. +func YAMLToJSON(value any) (json.RawMessage, error) { + jm, err := transformData(value) + if err != nil { + return nil, err + } + + b, err := jsonutils.WriteJSON(jm) + + return json.RawMessage(b), err +} + +// BytesToYAMLDoc converts a byte slice into a YAML document. +// +// This function only supports root documents that are objects. +// +// A YAML document is a pointer to a [yaml.Node]. +func BytesToYAMLDoc(data []byte) (any, error) { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { + return nil, err + } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML) + } + return &document, nil +} + +func yamlNode(root *yaml.Node) (any, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML) + } +} + +func yamlDocument(node *yaml.Node) (any, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (any, error) { + const sensibleAllocDivider = 2 // nodes concatenate (key,value) sequences + m := make(YAMLMapSlice, len(node.Content)/sensibleAllocDivider) + + if err := m.UnmarshalYAML(node); err != nil { + return nil, err + } + + return m, nil +} + +func yamlSequence(node *yaml.Node) (any, error) { + s := make([]any, 0) + + for i := range len(node.Content) { + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (any, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML) + } + return f, nil + case yamlTimestamp: + // YAML timestamp is marshaled as string, not time + return node.Value, nil + case yamlNull: + return nil, nil //nolint:nilnil + default: + return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML) + } +} + +func format(t any) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML) + } +} + +func transformData(input any) (out any, err error) { + switch in := input.(type) { + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) + case map[any]any: + o := make(YAMLMapSlice, 0, len(in)) + for ke, va := range in { + var nmi YAMLMapItem + if nmi.Key, err = format(ke); err != nil { + return nil, err + } + + v, ert := transformData(va) + if ert != nil { + return nil, ert + } + nmi.Value = v + o = append(o, nmi) + } + return o, nil + case []any: + len1 := len(in) + o := make([]any, len1) + for i := range len1 { + o[i], err = transformData(in[i]) + if err != nil { + return nil, err + } + } + return o, nil + } + return input, nil +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils_iface.go b/vendor/github.com/go-openapi/swag/yamlutils_iface.go new file mode 100644 index 00000000000..57767efc567 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yamlutils_iface.go @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package swag + +import ( + "encoding/json" + + "github.com/go-openapi/swag/yamlutils" +) + +// YAMLToJSON converts YAML unmarshaled data into json compatible data +// +// Deprecated: use [yamlutils.YAMLToJSON] instead. +func YAMLToJSON(data any) (json.RawMessage, error) { return yamlutils.YAMLToJSON(data) } + +// BytesToYAMLDoc converts a byte slice into a YAML document +// +// Deprecated: use [yamlutils.BytesToYAMLDoc] instead. +func BytesToYAMLDoc(data []byte) (any, error) { return yamlutils.BytesToYAMLDoc(data) } diff --git a/vendor/github.com/go-openapi/validate/.editorconfig b/vendor/github.com/go-openapi/validate/.editorconfig new file mode 100644 index 00000000000..3152da69a5d --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/validate/.gitattributes b/vendor/github.com/go-openapi/validate/.gitattributes new file mode 100644 index 00000000000..49ad52766ab --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/validate/.gitignore b/vendor/github.com/go-openapi/validate/.gitignore new file mode 100644 index 00000000000..fea8b84eca9 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml new file mode 100644 index 00000000000..10c513342fc --- /dev/null +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -0,0 +1,76 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - errchkjson + - errorlint + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - gomoddirectives + - gosmopolitan + - inamedparam + - intrange + - ireturn + - lll + - musttag + - nestif + - nlreturn + - nonamedreturns + - noinlineerr + - paralleltest + - recvcheck + - testpackage + - thelper + - tparallel + - unparam + - varnamelen + - whitespace + - wrapcheck + - wsl + - wsl_v5 + settings: + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + gocyclo: + min-complexity: 45 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md new file mode 100644 index 00000000000..79cf6a077ba --- /dev/null +++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md @@ -0,0 +1,31 @@ +# Benchmark + +Validating the Kubernetes Swagger API + +## v0.22.6: 60,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op +``` + +## After refact PR: minor but noticable improvements: 25,000,000 allocs +``` +go test -bench Spec +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op +``` + +## After reduce GC pressure PR: 17,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op +``` diff --git a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..9322b065e37 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/validate/LICENSE b/vendor/github.com/go-openapi/validate/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md new file mode 100644 index 00000000000..73d87ce4f01 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/README.md @@ -0,0 +1,40 @@ +# Validation helpers [![Build Status](https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/validate.svg)](https://pkg.go.dev/github.com/go-openapi/validate) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/validate)](https://goreportcard.com/report/github.com/go-openapi/validate) + +This package provides helpers to validate Swagger 2.0. specification (aka OpenAPI 2.0). + +Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. + +## What's inside? + +* A validator for Swagger specifications +* A validator for JSON schemas draft4 +* Helper functions to validate individual values (used by code generated by [go-swagger](https://github.com/go-swagger/go-swagger)). + * Required, RequiredNumber, RequiredString + * ReadOnly + * UniqueItems, MaxItems, MinItems + * Enum, EnumCase + * Pattern, MinLength, MaxLength + * Minimum, Maximum, MultipleOf + * FormatOf + +[Documentation](https://pkg.go.dev/github.com/go-openapi/validate) + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## FAQ + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 diff --git a/vendor/github.com/go-openapi/validate/context.go b/vendor/github.com/go-openapi/validate/context.go new file mode 100644 index 00000000000..b4587dcd560 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/context.go @@ -0,0 +1,59 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "context" +) + +// validateCtxKey is the key type of context key in this pkg +type validateCtxKey string + +const ( + operationTypeKey validateCtxKey = "operationTypeKey" +) + +type operationType string + +const ( + request operationType = "request" + response operationType = "response" + none operationType = "none" // not specified in ctx +) + +var operationTypeEnum = []operationType{request, response, none} + +// WithOperationRequest returns a new context with operationType request +// in context value +func WithOperationRequest(ctx context.Context) context.Context { + return withOperation(ctx, request) +} + +// WithOperationResponse returns a new context with operationType response +// in context value +func WithOperationResponse(ctx context.Context) context.Context { + return withOperation(ctx, response) +} + +func withOperation(ctx context.Context, operation operationType) context.Context { + return context.WithValue(ctx, operationTypeKey, operation) +} + +// extractOperationType extracts the operation type from ctx +// if not specified or of unknown value, return none operation type +func extractOperationType(ctx context.Context) operationType { + v := ctx.Value(operationTypeKey) + if v == nil { + return none + } + res, ok := v.(operationType) + if !ok { + return none + } + // validate the value is in operation enum + if err := Enum("", "", res, operationTypeEnum); err != nil { + return none + } + return res +} diff --git a/vendor/github.com/go-openapi/validate/debug.go b/vendor/github.com/go-openapi/validate/debug.go new file mode 100644 index 00000000000..79145a4495d --- /dev/null +++ b/vendor/github.com/go-openapi/validate/debug.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of validators. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // validateLogger is a debug logger for this package + validateLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + validateLogger = log.New(os.Stdout, "validate:", log.LstdFlags) +} + +func debugLog(msg string, args ...any) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + validateLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go new file mode 100644 index 00000000000..79a431677e4 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -0,0 +1,294 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "strings" + + "github.com/go-openapi/spec" +) + +// defaultValidator validates default values in a spec. +// According to Swagger spec, default values MUST validate their schema. +type defaultValidator struct { + SpecValidator *SpecValidator + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions +} + +// Validate validates the default values declared in the swagger spec +func (d *defaultValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() // will redeem when merged + + if d == nil || d.SpecValidator == nil { + return errs + } + d.resetVisited() + errs.Merge(d.validateDefaultValueValidAgainstSchema()) // error - + return errs +} + +// resetVisited resets the internal state of visited schemas +func (d *defaultValidator) resetVisited() { + if d.visitedSchemas == nil { + d.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range d.visitedSchemas { + delete(d.visitedSchemas, k) + } +} + +func isVisited(path string, visitedSchemas map[string]struct{}) bool { + _, found := visitedSchemas[path] + if found { + return true + } + + // search for overlapping paths + var ( + parent string + suffix string + ) + const backtrackFromEnd = 2 + for i := len(path) - backtrackFromEnd; i >= 0; i-- { + r := path[i] + if r != '.' { + continue + } + + parent = path[0:i] + suffix = path[i+1:] + + if strings.HasSuffix(parent, suffix) { + return true + } + } + + return false +} + +// beingVisited asserts a schema is being visited +func (d *defaultValidator) beingVisited(path string) { + d.visitedSchemas[path] = struct{}{} +} + +// isVisited tells if a path has already been visited +func (d *defaultValidator) isVisited(path string) bool { + return isVisited(path, d.visitedSchemas) +} + +func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { + // every default value that is specified must validate against the schema for that property + // headers, items, parameters, schema + + res := pools.poolOfResults.BorrowResult() // will redeem when merged + s := d.SpecValidator + + for method, pathItem := range s.expandedAnalyzer().Operations() { + for path, op := range pathItem { + // parameters + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + if param.Default != nil && param.Required { + res.AddWarnings(requiredHasDefaultMsg(param.Name, param.In)) + } + + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + // Check simple parameters first + // default values provided must validate against their inline definition (no explicit schema) + if param.Default != nil && param.Schema == nil { + // check param default value is valid + red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Recursively follows Items and Schemas + if param.Items != nil { + red := d.validateDefaultValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if param.Schema != nil { + // Validate default value against schema + red := d.validateDefaultValueSchemaAgainstSchema(param.Name, param.In, param.Schema) + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + } + + if op.Responses != nil { + if op.Responses.Default != nil { + // Same constraint on default Response + res.Merge(d.validateDefaultInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID)) + } + // Same constraint on regular Responses + if op.Responses.StatusCodeResponses != nil { // Safeguard + for code, r := range op.Responses.StatusCodeResponses { + res.Merge(d.validateDefaultInResponse(&r, "response", path, code, op.ID)) //#nosec + } + } + } else if op.ID != "" { + // Empty op.ID means there is no meaningful operation: no need to report a specific message + res.AddErrors(noValidResponseMsg(op.ID)) + } + } + } + if s.spec.Spec().Definitions != nil { // Safeguard + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + for nm, sch := range s.spec.Spec().Definitions { + res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec + } + } + return res +} + +func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result { + s := d.SpecValidator + + response, res := responseHelp.expandResponseRef(resp, path, s) + if !res.IsValid() { + return res + } + + responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) + + if response.Headers != nil { // Safeguard + for nm, h := range response.Headers { + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + if h.Default != nil { + red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Headers have inline definition, like params + if h.Items != nil { + red := d.validateDefaultValueItemsAgainstSchema(nm, "header", &h, h.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if _, err := compileRegexp(h.Pattern); err != nil { + res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err)) + } + + // Headers don't have schema + } + } + if response.Schema != nil { + // reset explored schemas to get depth-first recursive-proof exploration + d.resetVisited() + + red := d.validateDefaultValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema) + if red.HasErrorsOrWarnings() { + // Additional message to make sure the context of the error is not lost + res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + return res +} + +func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result { + if schema == nil || d.isVisited(path) { + // Avoids recursing if we are already done with that check + return nil + } + d.beingVisited(path) + res := pools.poolOfResults.BorrowResult() + s := d.SpecValidator + + if schema.Default != nil { + res.Merge( + newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default), + ) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".items.default", in, schema.Items.Schema)) + } + // Multiple schemas in items + if schema.Items.Schemas != nil { // Safeguard + for i, sch := range schema.Items.Schemas { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].default", path, i), in, &sch)) //#nosec + } + } + } + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern)) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) + } + for propName, prop := range schema.Properties { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + for propName, prop := range schema.PatternProperties { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) + } + if schema.AllOf != nil { + for i, aoSch := range schema.AllOf { + res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) //#nosec + } + } + return res +} + +// TODO: Temporary duplicated code. Need to refactor with examples + +func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { + res := pools.poolOfResults.BorrowResult() + s := d.SpecValidator + if items != nil { + if items.Default != nil { + res.Merge( + newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default), + ) + } + if items.Items != nil { + res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items)) + } + if _, err := compileRegexp(items.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) + } + } + return res +} diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go new file mode 100644 index 00000000000..a99893e1a38 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +/* +Package validate provides methods to validate a swagger specification, +as well as tools to validate data against their schema. + +This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference +can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. + +# Validating a specification + +Validates a spec document (from JSON or YAML) against the JSON schema for swagger, +then checks a number of extra rules that can't be expressed in JSON schema. + +Entry points: + - Spec() + - NewSpecValidator() + - SpecValidator.Validate() + +Reported as errors: + + [x] definition can't declare a property that's already defined by one of its ancestors + [x] definition's ancestor can't be a descendant of the same model + [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. + [x] each security reference should contain only unique scopes + [x] each security scope in a security definition should be unique + [x] parameters in path must be unique + [x] each path parameter must correspond to a parameter placeholder and vice versa + [x] each referenceable definition must have references + [x] each definition property listed in the required array must be defined in the properties of the model + [x] each parameter should have a unique `name` and `type` combination + [x] each operation should have only 1 parameter of type body + [x] each reference must point to a valid object + [x] every default value that is specified must validate against the schema for that property + [x] items property is required for all schemas/definitions of type `array` + [x] path parameters must be declared a required + [x] headers must not contain $ref + [x] schema and property examples provided must validate against their respective object's schema + [x] examples provided must validate their schema + +Reported as warnings: + + [x] path parameters should not contain any of [{,},\w] + [x] empty path + [x] unused definitions + [x] unsupported validation of examples on non-JSON media types + [x] examples in response without schema + [x] readOnly properties should not be required + +# Validating a schema + +The schema validation toolkit validates data against JSON-schema-draft 04 schema. + +It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite), +except for the optional part (bignum, ECMA regexp, ...). + +It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...) + +Entry points: + - AgainstSchema() + - ... + +# Known limitations + +With the current version of this package, the following aspects of swagger are not yet supported: + + [ ] errors and warnings are not reported with key/line number in spec + [ ] default values and examples on responses only support application/json producer type + [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values + [ ] rules for collectionFormat are not implemented + [ ] no validation rule for polymorphism support (discriminator) [not done here] + [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid + [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 +*/ +package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go new file mode 100644 index 00000000000..e4ef52c6dc1 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -0,0 +1,288 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + + "github.com/go-openapi/spec" +) + +// ExampleValidator validates example values defined in a spec +type exampleValidator struct { + SpecValidator *SpecValidator + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions +} + +// Validate validates the example values declared in the swagger spec +// Example values MUST conform to their schema. +// +// With Swagger 2.0, examples are supported in: +// - schemas +// - individual property +// - responses +func (ex *exampleValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() + + if ex == nil || ex.SpecValidator == nil { + return errs + } + ex.resetVisited() + errs.Merge(ex.validateExampleValueValidAgainstSchema()) // error - + + return errs +} + +// resetVisited resets the internal state of visited schemas +func (ex *exampleValidator) resetVisited() { + if ex.visitedSchemas == nil { + ex.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range ex.visitedSchemas { + delete(ex.visitedSchemas, k) + } +} + +// beingVisited asserts a schema is being visited +func (ex *exampleValidator) beingVisited(path string) { + ex.visitedSchemas[path] = struct{}{} +} + +// isVisited tells if a path has already been visited +func (ex *exampleValidator) isVisited(path string) bool { + return isVisited(path, ex.visitedSchemas) +} + +func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { + // every example value that is specified must validate against the schema for that property + // in: schemas, properties, object, items + // not in: headers, parameters without schema + + res := pools.poolOfResults.BorrowResult() + s := ex.SpecValidator + + for method, pathItem := range s.expandedAnalyzer().Operations() { + for path, op := range pathItem { + // parameters + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + + // As of swagger 2.0, Examples are not supported in simple parameters + // However, it looks like it is supported by go-openapi + + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + // Check simple parameters first + // default values provided must validate against their inline definition (no explicit schema) + if param.Example != nil && param.Schema == nil { + // check param default value is valid + red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) + res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Recursively follows Items and Schemas + if param.Items != nil { + red := ex.validateExampleValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if param.Schema != nil { + // Validate example value against schema + red := ex.validateExampleValueSchemaAgainstSchema(param.Name, param.In, param.Schema) + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + } + + if op.Responses != nil { + if op.Responses.Default != nil { + // Same constraint on default Response + res.Merge(ex.validateExampleInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID)) + } + // Same constraint on regular Responses + if op.Responses.StatusCodeResponses != nil { // Safeguard + for code, r := range op.Responses.StatusCodeResponses { + res.Merge(ex.validateExampleInResponse(&r, "response", path, code, op.ID)) //#nosec + } + } + } else if op.ID != "" { + // Empty op.ID means there is no meaningful operation: no need to report a specific message + res.AddErrors(noValidResponseMsg(op.ID)) + } + } + } + if s.spec.Spec().Definitions != nil { // Safeguard + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + for nm, sch := range s.spec.Spec().Definitions { + res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec + } + } + return res +} + +func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result { + s := ex.SpecValidator + + response, res := responseHelp.expandResponseRef(resp, path, s) + if !res.IsValid() { // Safeguard + return res + } + + responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) + + if response.Headers != nil { // Safeguard + for nm, h := range response.Headers { + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + if h.Example != nil { + red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) + res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + // Headers have inline definition, like params + if h.Items != nil { + red := ex.validateExampleValueItemsAgainstSchema(nm, "header", &h, h.Items) //#nosec + if red.HasErrorsOrWarnings() { + res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) + res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if _, err := compileRegexp(h.Pattern); err != nil { + res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err)) + } + + // Headers don't have schema + } + } + if response.Schema != nil { + // reset explored schemas to get depth-first recursive-proof exploration + ex.resetVisited() + + red := ex.validateExampleValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema) + if red.HasErrorsOrWarnings() { + // Additional message to make sure the context of the error is not lost + res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName)) + res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) + } + } + + if response.Examples != nil { + if response.Schema != nil { + if example, ok := response.Examples["application/json"]; ok { + res.MergeAsWarnings( + newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example), + ) + } else { + // TODO: validate other media types too + res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) + } + } else { + res.AddWarnings(examplesWithoutSchemaMsg(operationID, responseName)) + } + } + return res +} + +func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result { + if schema == nil || ex.isVisited(path) { + // Avoids recursing if we are already done with that check + return nil + } + ex.beingVisited(path) + s := ex.SpecValidator + res := pools.poolOfResults.BorrowResult() + + if schema.Example != nil { + res.MergeAsWarnings( + newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example), + ) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".items.example", in, schema.Items.Schema)) + } + // Multiple schemas in items + if schema.Items.Schemas != nil { // Safeguard + for i, sch := range schema.Items.Schemas { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].example", path, i), in, &sch)) //#nosec + } + } + } + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern)) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) + } + for propName, prop := range schema.Properties { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + for propName, prop := range schema.PatternProperties { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) + } + if schema.AllOf != nil { + for i, aoSch := range schema.AllOf { + res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) //#nosec + } + } + return res +} + +// TODO: Temporary duplicated code. Need to refactor with examples +// + +func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { + res := pools.poolOfResults.BorrowResult() + s := ex.SpecValidator + if items != nil { + if items.Example != nil { + res.MergeAsWarnings( + newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example), + ) + } + if items.Items != nil { + res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items)) + } + if _, err := compileRegexp(items.Pattern); err != nil { + res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) + } + } + + return res +} diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go new file mode 100644 index 00000000000..85ee6349418 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -0,0 +1,88 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type formatValidator struct { + Path string + In string + Format string + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var f *formatValidator + if opts.recycleValidators { + f = pools.poolOfFormatValidators.BorrowValidator() + } else { + f = new(formatValidator) + } + + f.Path = path + f.In = in + f.Format = format + f.KnownFormats = formats + f.Options = opts + + return f +} + +func (f *formatValidator) SetPath(path string) { + f.Path = path +} + +func (f *formatValidator) Applies(source any, kind reflect.Kind) bool { + if source == nil || f.KnownFormats == nil { + return false + } + + switch source := source.(type) { + case *spec.Items: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Parameter: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Schema: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Header: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + default: + return false + } +} + +func (f *formatValidator) Validate(val any) *Result { + if f.Options.recycleValidators { + defer func() { + f.redeem() + }() + } + + var result *Result + if f.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { + result.AddErrors(err) + } + + return result +} + +func (f *formatValidator) redeem() { + pools.poolOfFormatValidators.RedeemValidator(f) +} diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go new file mode 100644 index 00000000000..49b130473a9 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -0,0 +1,322 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +// TODO: define this as package validate/internal +// This must be done while keeping CI intact with all tests and test coverage + +import ( + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" +) + +const ( + swaggerBody = "body" + swaggerExample = "example" + swaggerExamples = "examples" +) + +const ( + objectType = "object" + arrayType = "array" + stringType = "string" + integerType = "integer" + numberType = "number" + booleanType = "boolean" + fileType = "file" + nullType = "null" +) + +const ( + jsonProperties = "properties" + jsonItems = "items" + jsonType = "type" + // jsonSchema = "schema" + jsonDefault = "default" +) + +const ( + stringFormatDate = "date" + stringFormatDateTime = "date-time" + stringFormatPassword = "password" + stringFormatByte = "byte" + // stringFormatBinary = "binary" + stringFormatCreditCard = "creditcard" + stringFormatDuration = "duration" + stringFormatEmail = "email" + stringFormatHexColor = "hexcolor" + stringFormatHostname = "hostname" + stringFormatIPv4 = "ipv4" + stringFormatIPv6 = "ipv6" + stringFormatISBN = "isbn" + stringFormatISBN10 = "isbn10" + stringFormatISBN13 = "isbn13" + stringFormatMAC = "mac" + stringFormatBSONObjectID = "bsonobjectid" + stringFormatRGBColor = "rgbcolor" + stringFormatSSN = "ssn" + stringFormatURI = "uri" + stringFormatUUID = "uuid" + stringFormatUUID3 = "uuid3" + stringFormatUUID4 = "uuid4" + stringFormatUUID5 = "uuid5" + + integerFormatInt32 = "int32" + integerFormatInt64 = "int64" + integerFormatUInt32 = "uint32" + integerFormatUInt64 = "uint64" + + numberFormatFloat32 = "float32" + numberFormatFloat64 = "float64" + numberFormatFloat = "float" + numberFormatDouble = "double" +) + +// Helpers available at the package level +var ( + pathHelp *pathHelper + valueHelp *valueHelper + errorHelp *errorHelper + paramHelp *paramHelper + responseHelp *responseHelper +) + +type errorHelper struct { + // A collection of unexported helpers for error construction +} + +func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result { + // Builds a Result from standard errors.Error + var result *Result + if recycle { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + result.Errors = []error{err} + + return result +} + +func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result { + // Provides more context on error messages + // reported by the jsoinpointer package by altering the passed Result + if err != nil { + res.AddErrors(cannotResolveRefMsg(fromPath, ref, err)) + } + return res +} + +type pathHelper struct { + // A collection of unexported helpers for path validation +} + +func (h *pathHelper) stripParametersInPath(path string) string { + // Returns a path stripped from all path parameters, with multiple or trailing slashes removed. + // + // Stripping is performed on a slash-separated basis, e.g '/a{/b}' remains a{/b} and not /a. + // - Trailing "/" make a difference, e.g. /a/ !~ /a (ex: canary/bitbucket.org/swagger.json) + // - presence or absence of a parameter makes a difference, e.g. /a/{log} !~ /a/ (ex: canary/kubernetes/swagger.json) + + // Regexp to extract parameters from path, with surrounding {}. + // NOTE: important non-greedy modifier + rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) + strippedSegments := []string{} + + for segment := range strings.SplitSeq(path, "/") { + strippedSegments = append(strippedSegments, rexParsePathParam.ReplaceAllString(segment, "X")) + } + return strings.Join(strippedSegments, "/") +} + +func (h *pathHelper) extractPathParams(path string) (params []string) { + // Extracts all params from a path, with surrounding "{}" + rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) + + for segment := range strings.SplitSeq(path, "/") { + for _, v := range rexParsePathParam.FindAllStringSubmatch(segment, -1) { + params = append(params, v...) + } + } + return +} + +type valueHelper struct { + // A collection of unexported helpers for value validation +} + +func (h *valueHelper) asInt64(val any) int64 { + // Number conversion function for int64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(v.Uint()) //nolint:gosec + case reflect.Float32, reflect.Float64: + return int64(v.Float()) + default: + // panic("Non numeric value in asInt64()") + return 0 + } +} + +func (h *valueHelper) asUint64(val any) uint64 { + // Number conversion function for uint64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return uint64(v.Int()) //nolint:gosec + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() + case reflect.Float32, reflect.Float64: + return uint64(v.Float()) + default: + // panic("Non numeric value in asUint64()") + return 0 + } +} + +// Same for unsigned floats +func (h *valueHelper) asFloat64(val any) float64 { + // Number conversion function for float64, without error checking + // (implements an implicit type upgrade). + v := reflect.ValueOf(val) + switch v.Kind() { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(v.Uint()) + case reflect.Float32, reflect.Float64: + return v.Float() + default: + // panic("Non numeric value in asFloat64()") + return 0 + } +} + +type paramHelper struct { + // A collection of unexported helpers for parameters resolution +} + +func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, res *Result, s *SpecValidator) (params []spec.Parameter) { + operation, ok := s.expandedAnalyzer().OperationFor(method, path) + if ok { + // expand parameters first if necessary + resolvedParams := []spec.Parameter{} + for _, ppr := range operation.Parameters { + resolvedParam, red := h.resolveParam(path, method, operationID, &ppr, s) //#nosec + res.Merge(red) + if resolvedParam != nil { + resolvedParams = append(resolvedParams, *resolvedParam) + } + } + // remove params with invalid expansion from Slice + operation.Parameters = resolvedParams + + for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path, + func(_ spec.Parameter, err error) bool { + // since params have already been expanded, there are few causes for error + res.AddErrors(someParametersBrokenMsg(path, method, operationID)) + // original error from analyzer + res.AddErrors(err) + return true + }) { + params = append(params, ppr) + } + } + return +} + +func (h *paramHelper) resolveParam(path, method, operationID string, param *spec.Parameter, s *SpecValidator) (*spec.Parameter, *Result) { + // Ensure parameter is expanded + var err error + res := new(Result) + isRef := param.Ref.String() != "" + if s.spec.SpecFilePath() == "" { + err = spec.ExpandParameterWithRoot(param, s.spec.Spec(), nil) + } else { + err = spec.ExpandParameter(param, s.spec.SpecFilePath()) + + } + if err != nil { // Safeguard + // NOTE: we may enter here when the whole parameter is an unresolved $ref + refPath := strings.Join([]string{"\"" + path + "\"", method}, ".") + errorHelp.addPointerError(res, err, param.Ref.String(), refPath) + return nil, res + } + res.Merge(h.checkExpandedParam(param, param.Name, param.In, operationID, isRef)) + return param, res +} + +func (h *paramHelper) checkExpandedParam(pr *spec.Parameter, path, in, operation string, isRef bool) *Result { + // Secure parameter structure after $ref resolution + res := new(Result) + simpleZero := spec.SimpleSchema{} + // Try to explain why... best guess + switch { + case pr.In == swaggerBody && (pr.SimpleSchema != simpleZero && pr.Type != objectType): + if isRef { + // Most likely, a $ref with a sibling is an unwanted situation: in itself this is a warning... + // but we detect it because of the following error: + // schema took over Parameter for an unexplained reason + res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation)) + } + res.AddErrors(invalidParameterDefinitionMsg(path, in, operation)) + case pr.In != swaggerBody && pr.Schema != nil: + if isRef { + res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation)) + } + res.AddErrors(invalidParameterDefinitionAsSchemaMsg(path, in, operation)) + case (pr.In == swaggerBody && pr.Schema == nil) || (pr.In != swaggerBody && pr.SimpleSchema == simpleZero): + // Other unexpected mishaps + res.AddErrors(invalidParameterDefinitionMsg(path, in, operation)) + } + return res +} + +type responseHelper struct { + // A collection of unexported helpers for response resolution +} + +func (r *responseHelper) expandResponseRef( + response *spec.Response, + path string, s *SpecValidator) (*spec.Response, *Result) { + // Ensure response is expanded + var err error + res := new(Result) + if s.spec.SpecFilePath() == "" { + // there is no physical document to resolve $ref in response + err = spec.ExpandResponseWithRoot(response, s.spec.Spec(), nil) + } else { + err = spec.ExpandResponse(response, s.spec.SpecFilePath()) + } + if err != nil { // Safeguard + // NOTE: we may enter here when the whole response is an unresolved $ref. + errorHelp.addPointerError(res, err, response.Ref.String(), path) + return nil, res + } + + return response, res +} + +func (r *responseHelper) responseMsgVariants( + responseType string, + responseCode int) (responseName, responseCodeAsStr string) { + // Path variants for messages + if responseType == jsonDefault { + responseCodeAsStr = jsonDefault + responseName = "default response" + } else { + responseCodeAsStr = strconv.Itoa(responseCode) + responseName = "response " + responseCodeAsStr + } + return +} diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go new file mode 100644 index 00000000000..cf98ed377d5 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -0,0 +1,420 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type objectValidator struct { + Path string + In string + MaxProperties *int64 + MinProperties *int64 + Required []string + Properties map[string]spec.Schema + AdditionalProperties *spec.SchemaOrBool + PatternProperties map[string]spec.Schema + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions + splitPath []string +} + +func newObjectValidator(path, in string, + maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties, + additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties, + root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *objectValidator + if opts.recycleValidators { + v = pools.poolOfObjectValidators.BorrowValidator() + } else { + v = new(objectValidator) + } + + v.Path = path + v.In = in + v.MaxProperties = maxProperties + v.MinProperties = minProperties + v.Required = required + v.Properties = properties + v.AdditionalProperties = additionalProperties + v.PatternProperties = patternProperties + v.Root = root + v.KnownFormats = formats + v.Options = opts + v.splitPath = strings.Split(v.Path, ".") + + return v +} + +func (o *objectValidator) Validate(data any) *Result { + if o.Options.recycleValidators { + defer func() { + o.redeem() + }() + } + + var val map[string]any + if data != nil { + var ok bool + val, ok = data.(map[string]any) + if !ok { + return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult) + } + } + numKeys := int64(len(val)) + + if o.MinProperties != nil && numKeys < *o.MinProperties { + return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult) + } + if o.MaxProperties != nil && numKeys > *o.MaxProperties { + return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult) + } + + var res *Result + if o.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } + + o.precheck(res, val) + + // check validity of field names + if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows { + // Case: additionalProperties: false + o.validateNoAdditionalProperties(val, res) + } else { + // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } + o.validateAdditionalProperties(val, res) + } + + o.validatePropertiesSchema(val, res) + + // Check patternProperties + // TODO: it looks like we have done that twice in many cases + for key, value := range val { + _, regularProperty := o.Properties[key] + matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well + if regularProperty || !matched { + continue + } + + for _, pName := range patterns { + if v, ok := o.PatternProperties[pName]; ok { + r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(data.(map[string]any), key, r) + } + } + } + + return res +} + +func (o *objectValidator) SetPath(path string) { + o.Path = path + o.splitPath = strings.Split(path, ".") +} + +func (o *objectValidator) Applies(source any, kind reflect.Kind) bool { + // TODO: this should also work for structs + // there is a problem in the type validator where it will be unhappy about null values + // so that requires more testing + _, isSchema := source.(*spec.Schema) + return isSchema && (kind == reflect.Map || kind == reflect.Struct) +} + +func (o *objectValidator) isProperties() bool { + p := o.splitPath + return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties +} + +func (o *objectValidator) isDefault() bool { + p := o.splitPath + return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault +} + +func (o *objectValidator) isExample() bool { + p := o.splitPath + return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample +} + +func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]any) { + // for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly. + // with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type). + if val == nil { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + return + } + + tpe, isString := t.(string) + if !isString || tpe != arrayType { + return + } + + item, itemsKeyFound := val[jsonItems] + if itemsKeyFound { + return + } + + res.AddErrors(errors.Required(jsonItems, o.Path, item)) +} + +func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]any) { + if val == nil { + return + } + + if o.isProperties() || o.isDefault() || o.isExample() { + return + } + + _, itemsKeyFound := val[jsonItems] + if !itemsKeyFound { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + // there is no type + res.AddErrors(errors.Required(jsonType, o.Path, t)) + } + + if tpe, isString := t.(string); !isString || tpe != arrayType { + res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) + } +} + +func (o *objectValidator) precheck(res *Result, val map[string]any) { + if o.Options.EnableArrayMustHaveItemsCheck { + o.checkArrayMustHaveItems(res, val) + } + if o.Options.EnableObjectArrayTypeCheck { + o.checkItemsMustBeTypeArray(res, val) + } +} + +func (o *objectValidator) validateNoAdditionalProperties(val map[string]any, res *Result) { + for k := range val { + if k == "$schema" || k == "id" { + // special properties "$schema" and "id" are ignored + continue + } + + _, regularProperty := o.Properties[k] + if regularProperty { + continue + } + + matched := false + for pk := range o.PatternProperties { + re, err := compileRegexp(pk) + if err != nil { + continue + } + if matches := re.MatchString(k); matches { + matched = true + break + } + } + if matched { + continue + } + + res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) + + // BUG(fredbi): This section should move to a part dedicated to spec validation as + // it will conflict with regular schemas where a property "headers" is defined. + + // + // Croaks a more explicit message on top of the standard one + // on some recognized cases. + // + // NOTE: edge cases with invalid type assertion are simply ignored here. + // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered + // by higher level callers (the IMPORTANT! tag will be eventually + // removed). + if k != "headers" || val[k] == nil { + continue + } + + // $ref is forbidden in header + headers, mapOk := val[k].(map[string]any) + if !mapOk { + continue + } + + for headerKey, headerBody := range headers { + if headerBody == nil { + continue + } + + headerSchema, mapOfMapOk := headerBody.(map[string]any) + if !mapOfMapOk { + continue + } + + _, found := headerSchema["$ref"] + if !found { + continue + } + + refString, stringOk := headerSchema["$ref"].(string) + if !stringOk { + continue + } + + msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") + res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) + /* + case "$ref": + if val[k] != nil { + // TODO: check context of that ref: warn about siblings, check against invalid context + } + */ + } + } +} + +func (o *objectValidator) validateAdditionalProperties(val map[string]any, res *Result) { + for key, value := range val { + _, regularProperty := o.Properties[key] + if regularProperty { + continue + } + + // Validates property against "patternProperties" if applicable + // BUG(fredbi): succeededOnce is always false + + // NOTE: how about regular properties which do not match patternProperties? + matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) + if matched || succeededOnce { + continue + } + + if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil { + continue + } + + // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator + // AdditionalProperties as Schema + r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(val, key, r) + } + // Valid cases: additionalProperties: true or undefined +} + +func (o *objectValidator) validatePropertiesSchema(val map[string]any, res *Result) { + createdFromDefaults := map[string]struct{}{} + + // Property types: + // - regular Property + pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties + defer func() { + pools.poolOfSchemas.RedeemSchema(pSchema) + }() + + for pName := range o.Properties { + *pSchema = o.Properties[pName] + var rName string + if o.Path == "" { + rName = pName + } else { + rName = o.Path + "." + pName + } + + // Recursively validates each property against its schema + v, ok := val[pName] + if ok { + r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v) + res.mergeForField(val, pName, r) + + continue + } + + if pSchema.Default != nil { + // if a default value is defined, creates the property from defaults + // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. + createdFromDefaults[pName] = struct{}{} + if !o.Options.skipSchemataResult { + res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer + } + } + } + + if len(o.Required) == 0 { + return + } + + // Check required properties + for _, k := range o.Required { + v, ok := val[k] + if ok { + continue + } + _, isCreatedFromDefaults := createdFromDefaults[k] + if isCreatedFromDefaults { + continue + } + + res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v)) + } +} + +// TODO: succeededOnce is not used anywhere +func (o *objectValidator) validatePatternProperty(key string, value any, result *Result) (bool, bool, []string) { + if len(o.PatternProperties) == 0 { + return false, false, nil + } + + matched := false + succeededOnce := false + patterns := make([]string, 0, len(o.PatternProperties)) + + schema := pools.poolOfSchemas.BorrowSchema() + defer func() { + pools.poolOfSchemas.RedeemSchema(schema) + }() + + for k := range o.PatternProperties { + re, err := compileRegexp(k) + if err != nil { + continue + } + + match := re.MatchString(key) + if !match { + continue + } + + *schema = o.PatternProperties[k] + patterns = append(patterns, k) + matched = true + validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options) + + res := validator.Validate(value) + result.Merge(res) + } + + return matched, succeededOnce, patterns +} + +func (o *objectValidator) redeem() { + pools.poolOfObjectValidators.RedeemValidator(o) +} diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go new file mode 100644 index 00000000000..f5e7f7131c7 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/options.go @@ -0,0 +1,51 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import "sync" + +// Opts specifies validation options for a SpecValidator. +// +// NOTE: other options might be needed, for example a go-swagger specific mode. +type Opts struct { + ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid + + // StrictPathParamUniqueness enables a strict validation of paths that include + // path parameters. When true, it will enforce that for each method, the path + // is unique, regardless of path parameters such that GET:/petstore/{id} and + // GET:/petstore/{pet} anre considered duplicate paths. + // + // Consider disabling if path parameters can include slashes such as + // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and + // /"shelve/*/book/*" respectively. + StrictPathParamUniqueness bool + SkipSchemataResult bool +} + +var ( + defaultOpts = Opts{ + // default is to stop validation on errors + ContinueOnErrors: false, + + // StrictPathParamUniqueness is defaulted to true. This maintains existing + // behavior. + StrictPathParamUniqueness: true, + } + + defaultOptsMutex = &sync.Mutex{} +) + +// SetContinueOnErrors sets global default behavior regarding spec validation errors reporting. +// +// For extended error reporting, you most likely want to set it to true. +// For faster validation, it's better to give up early when a spec is detected as invalid: set it to false (this is the default). +// +// Setting this mode does NOT affect the validation status. +// +// NOTE: this method affects global defaults. It is not suitable for a concurrent usage. +func SetContinueOnErrors(c bool) { + defer defaultOptsMutex.Unlock() + defaultOptsMutex.Lock() + defaultOpts.ContinueOnErrors = c +} diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go new file mode 100644 index 00000000000..1e734be493b --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -0,0 +1,369 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +//go:build !validatedebug + +package validate + +import ( + "sync" + + "github.com/go-openapi/spec" +) + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + }, + } +} + +type ( + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + } + + objectValidatorsPool struct { + *sync.Pool + } + + sliceValidatorsPool struct { + *sync.Pool + } + + itemsValidatorsPool struct { + *sync.Pool + } + + basicCommonValidatorsPool struct { + *sync.Pool + } + + headerValidatorsPool struct { + *sync.Pool + } + + paramValidatorsPool struct { + *sync.Pool + } + + basicSliceValidatorsPool struct { + *sync.Pool + } + + numberValidatorsPool struct { + *sync.Pool + } + + stringValidatorsPool struct { + *sync.Pool + } + + schemaPropsValidatorsPool struct { + *sync.Pool + } + + formatValidatorsPool struct { + *sync.Pool + } + + typeValidatorsPool struct { + *sync.Pool + } + + schemasPool struct { + *sync.Pool + } + + resultsPool struct { + *sync.Pool + } +) + +func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator { + return p.Get().(*SchemaValidator) +} + +func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.Put(s) +} + +func (p objectValidatorsPool) BorrowValidator() *objectValidator { + return p.Get().(*objectValidator) +} + +func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.Put(s) +} + +func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + return p.Get().(*schemaSliceValidator) +} + +func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.Put(s) +} + +func (p itemsValidatorsPool) BorrowValidator() *itemsValidator { + return p.Get().(*itemsValidator) +} + +func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.Put(s) +} + +func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + return p.Get().(*basicCommonValidator) +} + +func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.Put(s) +} + +func (p headerValidatorsPool) BorrowValidator() *HeaderValidator { + return p.Get().(*HeaderValidator) +} + +func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.Put(s) +} + +func (p paramValidatorsPool) BorrowValidator() *ParamValidator { + return p.Get().(*ParamValidator) +} + +func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.Put(s) +} + +func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + return p.Get().(*basicSliceValidator) +} + +func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.Put(s) +} + +func (p numberValidatorsPool) BorrowValidator() *numberValidator { + return p.Get().(*numberValidator) +} + +func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.Put(s) +} + +func (p stringValidatorsPool) BorrowValidator() *stringValidator { + return p.Get().(*stringValidator) +} + +func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.Put(s) +} + +func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + return p.Get().(*schemaPropsValidator) +} + +func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.Put(s) +} + +func (p formatValidatorsPool) BorrowValidator() *formatValidator { + return p.Get().(*formatValidator) +} + +func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.Put(s) +} + +func (p typeValidatorsPool) BorrowValidator() *typeValidator { + return p.Get().(*typeValidator) +} + +func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.Put(s) +} + +func (p schemasPool) BorrowSchema() *spec.Schema { + return p.Get().(*spec.Schema) +} + +func (p schemasPool) RedeemSchema(s *spec.Schema) { + p.Put(s) +} + +func (p resultsPool) BorrowResult() *Result { + return p.Get().(*Result).cleared() +} + +func (p resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + return + } + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/validate/pools_debug.go b/vendor/github.com/go-openapi/validate/pools_debug.go new file mode 100644 index 00000000000..d123ed4093f --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools_debug.go @@ -0,0 +1,1015 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +//go:build validatedebug + +package validate + +import ( + "fmt" + "runtime" + "sync" + "testing" + + "github.com/go-openapi/spec" +) + +// This version of the pools is to be used for debugging and testing, with build tag "validatedebug". +// +// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can +// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected. + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + debugMap: make(map[*SchemaValidator]status), + allocMap: make(map[*SchemaValidator]string), + redeemMap: make(map[*SchemaValidator]string), + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + debugMap: make(map[*objectValidator]status), + allocMap: make(map[*objectValidator]string), + redeemMap: make(map[*objectValidator]string), + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + debugMap: make(map[*schemaSliceValidator]status), + allocMap: make(map[*schemaSliceValidator]string), + redeemMap: make(map[*schemaSliceValidator]string), + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + debugMap: make(map[*itemsValidator]status), + allocMap: make(map[*itemsValidator]string), + redeemMap: make(map[*itemsValidator]string), + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + debugMap: make(map[*basicCommonValidator]status), + allocMap: make(map[*basicCommonValidator]string), + redeemMap: make(map[*basicCommonValidator]string), + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + debugMap: make(map[*HeaderValidator]status), + allocMap: make(map[*HeaderValidator]string), + redeemMap: make(map[*HeaderValidator]string), + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + debugMap: make(map[*ParamValidator]status), + allocMap: make(map[*ParamValidator]string), + redeemMap: make(map[*ParamValidator]string), + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + debugMap: make(map[*basicSliceValidator]status), + allocMap: make(map[*basicSliceValidator]string), + redeemMap: make(map[*basicSliceValidator]string), + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + debugMap: make(map[*numberValidator]status), + allocMap: make(map[*numberValidator]string), + redeemMap: make(map[*numberValidator]string), + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + debugMap: make(map[*stringValidator]status), + allocMap: make(map[*stringValidator]string), + redeemMap: make(map[*stringValidator]string), + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + debugMap: make(map[*schemaPropsValidator]status), + allocMap: make(map[*schemaPropsValidator]string), + redeemMap: make(map[*schemaPropsValidator]string), + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + debugMap: make(map[*formatValidator]status), + allocMap: make(map[*formatValidator]string), + redeemMap: make(map[*formatValidator]string), + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + debugMap: make(map[*typeValidator]status), + allocMap: make(map[*typeValidator]string), + redeemMap: make(map[*typeValidator]string), + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + debugMap: make(map[*spec.Schema]status), + allocMap: make(map[*spec.Schema]string), + redeemMap: make(map[*spec.Schema]string), + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + debugMap: make(map[*Result]status), + allocMap: make(map[*Result]string), + redeemMap: make(map[*Result]string), + }, + } +} + +const ( + statusFresh status = iota + 1 + statusRecycled + statusRedeemed +) + +func (s status) String() string { + switch s { + case statusFresh: + return "fresh" + case statusRecycled: + return "recycled" + case statusRedeemed: + return "redeemed" + default: + panic(fmt.Errorf("invalid status: %d", s)) + } +} + +type ( + // Debug + status uint8 + + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + debugMap map[*SchemaValidator]status + allocMap map[*SchemaValidator]string + redeemMap map[*SchemaValidator]string + mx sync.Mutex + } + + objectValidatorsPool struct { + *sync.Pool + debugMap map[*objectValidator]status + allocMap map[*objectValidator]string + redeemMap map[*objectValidator]string + mx sync.Mutex + } + + sliceValidatorsPool struct { + *sync.Pool + debugMap map[*schemaSliceValidator]status + allocMap map[*schemaSliceValidator]string + redeemMap map[*schemaSliceValidator]string + mx sync.Mutex + } + + itemsValidatorsPool struct { + *sync.Pool + debugMap map[*itemsValidator]status + allocMap map[*itemsValidator]string + redeemMap map[*itemsValidator]string + mx sync.Mutex + } + + basicCommonValidatorsPool struct { + *sync.Pool + debugMap map[*basicCommonValidator]status + allocMap map[*basicCommonValidator]string + redeemMap map[*basicCommonValidator]string + mx sync.Mutex + } + + headerValidatorsPool struct { + *sync.Pool + debugMap map[*HeaderValidator]status + allocMap map[*HeaderValidator]string + redeemMap map[*HeaderValidator]string + mx sync.Mutex + } + + paramValidatorsPool struct { + *sync.Pool + debugMap map[*ParamValidator]status + allocMap map[*ParamValidator]string + redeemMap map[*ParamValidator]string + mx sync.Mutex + } + + basicSliceValidatorsPool struct { + *sync.Pool + debugMap map[*basicSliceValidator]status + allocMap map[*basicSliceValidator]string + redeemMap map[*basicSliceValidator]string + mx sync.Mutex + } + + numberValidatorsPool struct { + *sync.Pool + debugMap map[*numberValidator]status + allocMap map[*numberValidator]string + redeemMap map[*numberValidator]string + mx sync.Mutex + } + + stringValidatorsPool struct { + *sync.Pool + debugMap map[*stringValidator]status + allocMap map[*stringValidator]string + redeemMap map[*stringValidator]string + mx sync.Mutex + } + + schemaPropsValidatorsPool struct { + *sync.Pool + debugMap map[*schemaPropsValidator]status + allocMap map[*schemaPropsValidator]string + redeemMap map[*schemaPropsValidator]string + mx sync.Mutex + } + + formatValidatorsPool struct { + *sync.Pool + debugMap map[*formatValidator]status + allocMap map[*formatValidator]string + redeemMap map[*formatValidator]string + mx sync.Mutex + } + + typeValidatorsPool struct { + *sync.Pool + debugMap map[*typeValidator]status + allocMap map[*typeValidator]string + redeemMap map[*typeValidator]string + mx sync.Mutex + } + + schemasPool struct { + *sync.Pool + debugMap map[*spec.Schema]status + allocMap map[*spec.Schema]string + redeemMap map[*spec.Schema]string + mx sync.Mutex + } + + resultsPool struct { + *sync.Pool + debugMap map[*Result]status + allocMap map[*Result]string + redeemMap map[*Result]string + mx sync.Mutex + } +) + +func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator { + s := p.Get().(*SchemaValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *objectValidatorsPool) BorrowValidator() *objectValidator { + s := p.Get().(*objectValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled object should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed object should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed object should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + s := p.Get().(*schemaSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schemaSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator { + s := p.Get().(*itemsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled itemsValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed itemsValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + s := p.Get().(*basicCommonValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicCommonValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicCommonValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator { + s := p.Get().(*HeaderValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled HeaderValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed header should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed header should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *paramValidatorsPool) BorrowValidator() *ParamValidator { + s := p.Get().(*ParamValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed param should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed param should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + s := p.Get().(*basicSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *numberValidatorsPool) BorrowValidator() *numberValidator { + s := p.Get().(*numberValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled number should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed number should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed number should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *stringValidatorsPool) BorrowValidator() *stringValidator { + s := p.Get().(*stringValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled string should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed string should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed string should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + s := p.Get().(*schemaPropsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaProps should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *formatValidatorsPool) BorrowValidator() *formatValidator { + s := p.Get().(*formatValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled format should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed format should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed format should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *typeValidatorsPool) BorrowValidator() *typeValidator { + s := p.Get().(*typeValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled type should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed type should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s])) + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemasPool) BorrowSchema() *spec.Schema { + s := p.Get().(*spec.Schema) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled spec.Schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemasPool) RedeemSchema(s *spec.Schema) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed spec.Schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *resultsPool) BorrowResult() *Result { + s := p.Get().(*Result).cleared() + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled result should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + if len(s.Errors) > 0 || len(s.Warnings) > 0 { + panic("empty result should not mutate") + } + return + } + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed Result should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed Result should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *allPools) allIsRedeemed(t testing.TB) bool { + outcome := true + for k, v := range p.poolOfSchemaValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfObjectValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfItemsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicCommonValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfHeaderValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfParamValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfNumberValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfStringValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemaPropsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfFormatValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfTypeValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemas.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfResults.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k]) + outcome = false + } + + return outcome +} + +func caller() string { + pc, _, _, _ := runtime.Caller(3) //nolint:dogsled + from, line := runtime.FuncForPC(pc).FileLine(pc) + + return fmt.Sprintf("%s:%d", from, line) +} diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go new file mode 100644 index 00000000000..69219e99823 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/result.go @@ -0,0 +1,560 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + stderrors "errors" + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" +) + +var emptyResult = &Result{MatchCount: 1} + +// Result represents a validation result set, composed of +// errors and warnings. +// +// It is used to keep track of all detected errors and warnings during +// the validation of a specification. +// +// Matchcount is used to determine +// which errors are relevant in the case of AnyOf, OneOf +// schema validation. Results from the validation branch +// with most matches get eventually selected. +// +// TODO: keep path of key originating the error +type Result struct { + Errors []error + Warnings []error + MatchCount int + + // the object data + data any + + // Schemata for the root object + rootObjectSchemata schemata + // Schemata for object fields + fieldSchemata []fieldSchemata + // Schemata for slice items + itemSchemata []itemSchemata + + cachedFieldSchemata map[FieldKey][]*spec.Schema + cachedItemSchemata map[ItemKey][]*spec.Schema + + wantsRedeemOnMerge bool +} + +// FieldKey is a pair of an object and a field, usable as a key for a map. +type FieldKey struct { + object reflect.Value // actually a map[string]any, but the latter cannot be a key + field string +} + +// ItemKey is a pair of a slice and an index, usable as a key for a map. +type ItemKey struct { + slice reflect.Value // actually a []any, but the latter cannot be a key + index int +} + +// NewFieldKey returns a pair of an object and field usable as a key of a map. +func NewFieldKey(obj map[string]any, field string) FieldKey { + return FieldKey{object: reflect.ValueOf(obj), field: field} +} + +// Object returns the underlying object of this key. +func (fk *FieldKey) Object() map[string]any { + return fk.object.Interface().(map[string]any) +} + +// Field returns the underlying field of this key. +func (fk *FieldKey) Field() string { + return fk.field +} + +// NewItemKey returns a pair of a slice and index usable as a key of a map. +func NewItemKey(slice any, i int) ItemKey { + return ItemKey{slice: reflect.ValueOf(slice), index: i} +} + +// Slice returns the underlying slice of this key. +func (ik *ItemKey) Slice() []any { + return ik.slice.Interface().([]any) +} + +// Index returns the underlying index of this key. +func (ik *ItemKey) Index() int { + return ik.index +} + +type fieldSchemata struct { + obj map[string]any + field string + schemata schemata +} + +type itemSchemata struct { + slice reflect.Value + index int + schemata schemata +} + +// Merge merges this result with the other one(s), preserving match counts etc. +func (r *Result) Merge(others ...*Result) *Result { + for _, other := range others { + if other == nil { + continue + } + r.mergeWithoutRootSchemata(other) + r.rootObjectSchemata.Append(other.rootObjectSchemata) + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + } + return r +} + +// Data returns the original data object used for validation. Mutating this renders +// the result invalid. +func (r *Result) Data() any { + return r.data +} + +// RootObjectSchemata returns the schemata which apply to the root object. +func (r *Result) RootObjectSchemata() []*spec.Schema { + return r.rootObjectSchemata.Slice() +} + +// FieldSchemata returns the schemata which apply to fields in objects. +func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { + if r.cachedFieldSchemata != nil { + return r.cachedFieldSchemata + } + + ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata)) + for _, fs := range r.fieldSchemata { + key := NewFieldKey(fs.obj, fs.field) + if fs.schemata.one != nil { + ret[key] = append(ret[key], fs.schemata.one) + } else if len(fs.schemata.multiple) > 0 { + ret[key] = append(ret[key], fs.schemata.multiple...) + } + } + r.cachedFieldSchemata = ret + + return ret +} + +// ItemSchemata returns the schemata which apply to items in slices. +func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { + if r.cachedItemSchemata != nil { + return r.cachedItemSchemata + } + + ret := make(map[ItemKey][]*spec.Schema, len(r.itemSchemata)) + for _, ss := range r.itemSchemata { + key := NewItemKey(ss.slice, ss.index) + if ss.schemata.one != nil { + ret[key] = append(ret[key], ss.schemata.one) + } else if len(ss.schemata.multiple) > 0 { + ret[key] = append(ret[key], ss.schemata.multiple...) + } + } + r.cachedItemSchemata = ret + return ret +} + +// MergeAsErrors merges this result with the other one(s), preserving match counts etc. +// +// Warnings from input are merged as Errors in the returned merged Result. +func (r *Result) MergeAsErrors(others ...*Result) *Result { + for _, other := range others { + if other != nil { + r.resetCaches() + r.AddErrors(other.Errors...) + r.AddErrors(other.Warnings...) + r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + } + } + return r +} + +// MergeAsWarnings merges this result with the other one(s), preserving match counts etc. +// +// Errors from input are merged as Warnings in the returned merged Result. +func (r *Result) MergeAsWarnings(others ...*Result) *Result { + for _, other := range others { + if other != nil { + r.resetCaches() + r.AddWarnings(other.Errors...) + r.AddWarnings(other.Warnings...) + r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + } + } + return r +} + +// AddErrors adds errors to this validation result (if not already reported). +// +// Since the same check may be passed several times while exploring the +// spec structure (via $ref, ...) reported messages are kept +// unique. +func (r *Result) AddErrors(errors ...error) { + for _, e := range errors { + found := false + if e != nil { + for _, isReported := range r.Errors { + if e.Error() == isReported.Error() { + found = true + break + } + } + if !found { + r.Errors = append(r.Errors, e) + } + } + } +} + +// AddWarnings adds warnings to this validation result (if not already reported). +func (r *Result) AddWarnings(warnings ...error) { + for _, e := range warnings { + found := false + if e != nil { + for _, isReported := range r.Warnings { + if e.Error() == isReported.Error() { + found = true + break + } + } + if !found { + r.Warnings = append(r.Warnings, e) + } + } + } +} + +// IsValid returns true when this result is valid. +// +// Returns true on a nil *Result. +func (r *Result) IsValid() bool { + if r == nil { + return true + } + return len(r.Errors) == 0 +} + +// HasErrors returns true when this result is invalid. +// +// Returns false on a nil *Result. +func (r *Result) HasErrors() bool { + if r == nil { + return false + } + return !r.IsValid() +} + +// HasWarnings returns true when this result contains warnings. +// +// Returns false on a nil *Result. +func (r *Result) HasWarnings() bool { + if r == nil { + return false + } + return len(r.Warnings) > 0 +} + +// HasErrorsOrWarnings returns true when this result contains +// either errors or warnings. +// +// Returns false on a nil *Result. +func (r *Result) HasErrorsOrWarnings() bool { + if r == nil { + return false + } + return len(r.Errors) > 0 || len(r.Warnings) > 0 +} + +// Inc increments the match count +func (r *Result) Inc() { + r.MatchCount++ +} + +// AsError renders this result as an error interface +// +// TODO: reporting / pretty print with path ordered and indented +func (r *Result) AsError() error { + if r.IsValid() { + return nil + } + return errors.CompositeValidationError(r.Errors...) +} + +func (r *Result) resetCaches() { + r.cachedFieldSchemata = nil + r.cachedItemSchemata = nil +} + +// mergeForField merges other into r, assigning other's root schemata to the given Object and field name. +// +//nolint:unparam +func (r *Result) mergeForField(obj map[string]any, field string, other *Result) *Result { + if other == nil { + return r + } + r.mergeWithoutRootSchemata(other) + + if other.rootObjectSchemata.Len() > 0 { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, len(obj)) + } + // clone other schemata, as other is about to be redeemed to the pool + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{ + obj: obj, + field: field, + schemata: other.rootObjectSchemata.Clone(), + }) + } + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + + return r +} + +// mergeForSlice merges other into r, assigning other's root schemata to the given slice and index. +// +//nolint:unparam +func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result { + if other == nil { + return r + } + r.mergeWithoutRootSchemata(other) + + if other.rootObjectSchemata.Len() > 0 { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, slice.Len()) + } + // clone other schemata, as other is about to be redeemed to the pool + r.itemSchemata = append(r.itemSchemata, itemSchemata{ + slice: slice, + index: i, + schemata: other.rootObjectSchemata.Clone(), + }) + } + + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + + return r +} + +// addRootObjectSchemata adds the given schemata for the root object of the result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. +func (r *Result) addRootObjectSchemata(s *spec.Schema) { + clone := *s + r.rootObjectSchemata.Append(schemata{one: &clone}) +} + +// addPropertySchemata adds the given schemata for the object and field. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. +func (r *Result) addPropertySchemata(obj map[string]any, fld string, schema *spec.Schema) { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, 0, len(obj)) + } + clone := *schema + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}}) +} + +/* +// addSliceSchemata adds the given schemata for the slice and index. +// The slice schemata might be reused. I.e. do not modify it after being added to a result. +func (r *Result) addSliceSchemata(slice reflect.Value, i int, schema *spec.Schema) { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, 0, slice.Len()) + } + r.itemSchemata = append(r.itemSchemata, itemSchemata{slice: slice, index: i, schemata: schemata{one: schema}}) +} +*/ + +// mergeWithoutRootSchemata merges other into r, ignoring the rootObject schemata. +func (r *Result) mergeWithoutRootSchemata(other *Result) { + r.resetCaches() + r.AddErrors(other.Errors...) + r.AddWarnings(other.Warnings...) + r.MatchCount += other.MatchCount + + if other.fieldSchemata != nil { + if r.fieldSchemata == nil { + r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata)) + } + for _, field := range other.fieldSchemata { + field.schemata = field.schemata.Clone() + r.fieldSchemata = append(r.fieldSchemata, field) + } + } + + if other.itemSchemata != nil { + if r.itemSchemata == nil { + r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata)) + } + for _, field := range other.itemSchemata { + field.schemata = field.schemata.Clone() + r.itemSchemata = append(r.itemSchemata, field) + } + } +} + +func isImportant(err error) bool { + return strings.HasPrefix(err.Error(), "IMPORTANT!") +} + +func stripImportantTag(err error) error { + return stderrors.New(strings.TrimPrefix(err.Error(), "IMPORTANT!")) //nolint:err113 +} + +func (r *Result) keepRelevantErrors() *Result { + // TODO: this one is going to disapear... + // keepRelevantErrors strips a result from standard errors and keeps + // the ones which are supposedly more accurate. + // + // The original result remains unaffected (creates a new instance of Result). + // This method is used to work around the "matchCount" filter which would otherwise + // strip our result from some accurate error reporting from lower level validators. + // + // NOTE: this implementation with a placeholder (IMPORTANT!) is neither clean nor + // very efficient. On the other hand, relying on go-openapi/errors to manipulate + // codes would require to change a lot here. So, for the moment, let's go with + // placeholders. + strippedErrors := []error{} + for _, e := range r.Errors { + if isImportant(e) { + strippedErrors = append(strippedErrors, stripImportantTag(e)) + } + } + strippedWarnings := []error{} + for _, e := range r.Warnings { + if isImportant(e) { + strippedWarnings = append(strippedWarnings, stripImportantTag(e)) + } + } + var strippedResult *Result + if r.wantsRedeemOnMerge { + strippedResult = pools.poolOfResults.BorrowResult() + } else { + strippedResult = new(Result) + } + strippedResult.Errors = strippedErrors + strippedResult.Warnings = strippedWarnings + return strippedResult +} + +func (r *Result) cleared() *Result { + // clear the Result to be reusable. Keep allocated capacity. + r.Errors = r.Errors[:0] + r.Warnings = r.Warnings[:0] + r.MatchCount = 0 + r.data = nil + r.rootObjectSchemata.one = nil + r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0] + r.fieldSchemata = r.fieldSchemata[:0] + r.itemSchemata = r.itemSchemata[:0] + for k := range r.cachedFieldSchemata { + delete(r.cachedFieldSchemata, k) + } + for k := range r.cachedItemSchemata { + delete(r.cachedItemSchemata, k) + } + r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another + + return r +} + +// schemata is an arbitrary number of schemata. It does a distinction between zero, +// one and many schemata to avoid slice allocations. +type schemata struct { + // one is set if there is exactly one schema. In that case multiple must be nil. + one *spec.Schema + // multiple is an arbitrary number of schemas. If it is set, one must be nil. + multiple []*spec.Schema +} + +func (s *schemata) Len() int { + if s.one != nil { + return 1 + } + return len(s.multiple) +} + +func (s *schemata) Slice() []*spec.Schema { + if s == nil { + return nil + } + if s.one != nil { + return []*spec.Schema{s.one} + } + return s.multiple +} + +// Append appends the schemata in other to s. It mutates s in-place. +func (s *schemata) Append(other schemata) { + if other.one == nil && len(other.multiple) == 0 { + return + } + if s.one == nil && len(s.multiple) == 0 { + *s = other + return + } + + if s.one != nil { + if other.one != nil { + s.multiple = []*spec.Schema{s.one, other.one} + } else { + t := make([]*spec.Schema, 0, 1+len(other.multiple)) + s.multiple = append(append(t, s.one), other.multiple...) + } + s.one = nil + } else { + if other.one != nil { + s.multiple = append(s.multiple, other.one) + } else { + if cap(s.multiple) >= len(s.multiple)+len(other.multiple) { + s.multiple = append(s.multiple, other.multiple...) + } else { + t := make([]*spec.Schema, 0, len(s.multiple)+len(other.multiple)) + s.multiple = append(append(t, s.multiple...), other.multiple...) + } + } + } +} + +func (s schemata) Clone() schemata { + var clone schemata + + if s.one != nil { + clone.one = new(spec.Schema) + *clone.one = *s.one + } + + if len(s.multiple) > 0 { + clone.multiple = make([]*spec.Schema, len(s.multiple)) + for idx := range len(s.multiple) { + sp := new(spec.Schema) + *sp = *s.multiple[idx] + clone.multiple[idx] = sp + } + } + + return clone +} diff --git a/vendor/github.com/go-openapi/validate/rexp.go b/vendor/github.com/go-openapi/validate/rexp.go new file mode 100644 index 00000000000..795f148d0cf --- /dev/null +++ b/vendor/github.com/go-openapi/validate/rexp.go @@ -0,0 +1,59 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "maps" + re "regexp" + "sync" + "sync/atomic" +) + +// Cache for compiled regular expressions +var ( + cacheMutex = &sync.Mutex{} + reDict = atomic.Value{} // map[string]*re.Regexp +) + +func compileRegexp(pattern string) (*re.Regexp, error) { + if cache, ok := reDict.Load().(map[string]*re.Regexp); ok { + if r := cache[pattern]; r != nil { + return r, nil + } + } + + r, err := re.Compile(pattern) + if err != nil { + return nil, err + } + cacheRegexp(r) + return r, nil +} + +func mustCompileRegexp(pattern string) *re.Regexp { + if cache, ok := reDict.Load().(map[string]*re.Regexp); ok { + if r := cache[pattern]; r != nil { + return r + } + } + + r := re.MustCompile(pattern) + cacheRegexp(r) + return r +} + +func cacheRegexp(r *re.Regexp) { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + if cache, ok := reDict.Load().(map[string]*re.Regexp); !ok || cache[r.String()] == nil { + newCache := map[string]*re.Regexp{ + r.String(): r, + } + + maps.Copy(newCache, cache) + + reDict.Store(newCache) + } +} diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go new file mode 100644 index 00000000000..375a98765d7 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -0,0 +1,351 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "encoding/json" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/jsonutils" +) + +// SchemaValidator validates data against a JSON schema +type SchemaValidator struct { + Path string + in string + Schema *spec.Schema + validators [8]valueValidator + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +// AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. +// +// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. +func AgainstSchema(schema *spec.Schema, data any, formats strfmt.Registry, options ...Option) error { + res := NewSchemaValidator(schema, nil, "", formats, + append(options, WithRecycleValidators(true), withRecycleResults(true))..., + ).Validate(data) + defer func() { + pools.poolOfResults.RedeemResult(res) + }() + + if res.HasErrors() { + return errors.CompositeValidationError(res.Errors...) + } + + return nil +} + +// NewSchemaValidator creates a new schema validator. +// +// Panics if the provided schema is invalid. +func NewSchemaValidator(schema *spec.Schema, rootSchema any, root string, formats strfmt.Registry, options ...Option) *SchemaValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newSchemaValidator(schema, rootSchema, root, formats, opts) +} + +func newSchemaValidator(schema *spec.Schema, rootSchema any, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator { + if schema == nil { + return nil + } + + if rootSchema == nil { + rootSchema = schema + } + + if schema.ID != "" || schema.Ref.String() != "" || schema.Ref.IsRoot() { + err := spec.ExpandSchema(schema, rootSchema, nil) + if err != nil { + msg := invalidSchemaProvidedMsg(err).Error() + panic(msg) + } + } + + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *SchemaValidator + if opts.recycleValidators { + s = pools.poolOfSchemaValidators.BorrowValidator() + } else { + s = new(SchemaValidator) + } + + s.Path = root + s.in = "body" + s.Schema = schema + s.Root = rootSchema + s.Options = opts + s.KnownFormats = formats + + s.validators = [8]valueValidator{ + s.typeValidator(), + s.schemaPropsValidator(), + s.stringValidator(), + s.formatValidator(), + s.numberValidator(), + s.sliceValidator(), + s.commonValidator(), + s.objectValidator(), + } + + return s +} + +// SetPath sets the path for this schema valdiator +func (s *SchemaValidator) SetPath(path string) { + s.Path = path +} + +// Applies returns true when this schema validator applies +func (s *SchemaValidator) Applies(source any, _ reflect.Kind) bool { + _, ok := source.(*spec.Schema) + return ok +} + +// Validate validates the data against the schema +func (s *SchemaValidator) Validate(data any) *Result { + if s == nil { + return emptyResult + } + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() // one-time use validator + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + result.data = data + } else { + result = &Result{data: data} + } + + if s.Schema != nil && !s.Options.skipSchemataResult { + result.addRootObjectSchemata(s.Schema) + } + + if data == nil { + // early exit with minimal validation + result.Merge(s.validators[0].Validate(data)) // type validator + result.Merge(s.validators[6].Validate(data)) // common validator + + if s.Options.recycleValidators { + s.validators[0] = nil + s.validators[6] = nil + } + + return result + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + for kind == reflect.Ptr { + tpe = tpe.Elem() + kind = tpe.Kind() + } + d := data + + if kind == reflect.Struct { + // NOTE: since reflect retrieves the true nature of types + // this means that all strfmt types passed here (e.g. strfmt.Datetime, etc..) + // are converted here to strings, and structs are systematically converted + // to map[string]interface{}. + var dd any + if err := jsonutils.FromDynamicJSON(data, &dd); err != nil { + result.AddErrors(err) + result.Inc() + + return result + } + + d = dd + } + + // TODO: this part should be handed over to type validator + // Handle special case of json.Number data (number marshalled as string) + isnumber := s.Schema != nil && (s.Schema.Type.Contains(numberType) || s.Schema.Type.Contains(integerType)) + if num, ok := data.(json.Number); ok && isnumber { + if s.Schema.Type.Contains(integerType) { // avoid lossy conversion + in, erri := num.Int64() + if erri != nil { + result.AddErrors(invalidTypeConversionMsg(s.Path, erri)) + result.Inc() + + return result + } + d = in + } else { + nf, errf := num.Float64() + if errf != nil { + result.AddErrors(invalidTypeConversionMsg(s.Path, errf)) + result.Inc() + + return result + } + d = nf + } + + tpe = reflect.TypeOf(d) + kind = tpe.Kind() + } + + for idx, v := range s.validators { + if !v.Applies(s.Schema, kind) { + if s.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := v.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + result.Merge(v.Validate(d)) + if s.Options.recycleValidators { + s.validators[idx] = nil // prevents further (unsafe) usage + } + result.Inc() + } + result.Inc() + + return result +} + +func (s *SchemaValidator) typeValidator() valueValidator { + return newTypeValidator( + s.Path, + s.in, + s.Schema.Type, + s.Schema.Nullable, + s.Schema.Format, + s.Options, + ) +} + +func (s *SchemaValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.Enum, + s.Options, + ) +} + +func (s *SchemaValidator) sliceValidator() valueValidator { + return newSliceValidator( + s.Path, + s.in, + s.Schema.MaxItems, + s.Schema.MinItems, + s.Schema.UniqueItems, + s.Schema.AdditionalItems, + s.Schema.Items, + s.Root, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) numberValidator() valueValidator { + return newNumberValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.MultipleOf, + s.Schema.Maximum, + s.Schema.ExclusiveMaximum, + s.Schema.Minimum, + s.Schema.ExclusiveMinimum, + "", + "", + s.Options, + ) +} + +func (s *SchemaValidator) stringValidator() valueValidator { + return newStringValidator( + s.Path, + s.in, + nil, + false, + false, + s.Schema.MaxLength, + s.Schema.MinLength, + s.Schema.Pattern, + s.Options, + ) +} + +func (s *SchemaValidator) formatValidator() valueValidator { + return newFormatValidator( + s.Path, + s.in, + s.Schema.Format, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) schemaPropsValidator() valueValidator { + sch := s.Schema + return newSchemaPropsValidator( + s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) objectValidator() valueValidator { + return newObjectValidator( + s.Path, + s.in, + s.Schema.MaxProperties, + s.Schema.MinProperties, + s.Schema.Required, + s.Schema.Properties, + s.Schema.AdditionalProperties, + s.Schema.PatternProperties, + s.Root, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) redeem() { + pools.poolOfSchemaValidators.RedeemValidator(s) +} + +func (s *SchemaValidator) redeemChildren() { + for i, validator := range s.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[i] = nil // free up allocated children if not in pool + } +} diff --git a/vendor/github.com/go-openapi/validate/schema_messages.go b/vendor/github.com/go-openapi/validate/schema_messages.go new file mode 100644 index 00000000000..e8c7c48ad7f --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_messages.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "github.com/go-openapi/errors" +) + +// Error messages related to schema validation and returned as results. +const ( + // ArrayDoesNotAllowAdditionalItemsError when an additionalItems construct is not verified by the array values provided. + // + // TODO: should move to package go-openapi/errors + ArrayDoesNotAllowAdditionalItemsError = "array doesn't allow for additional items" + + // HasDependencyError indicates that a dependencies construct was not verified + HasDependencyError = "%q has a dependency on %s" + + // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled + InvalidSchemaProvidedError = "Invalid schema provided to SchemaValidator: %v" + + // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on + InvalidTypeConversionError = "invalid type conversion in %s: %v " + + // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified + MustValidateAtLeastOneSchemaError = "%q must validate at least one schema (anyOf)" + + // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were + MustValidateOnlyOneSchemaError = "%q must validate one and only one schema (oneOf). %s" + + // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified + // + // TODO: punctuation in message + MustValidateAllSchemasError = "%q must validate all the schemas (allOf)%s" + + // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified + MustNotValidateSchemaError = "%q must not validate the schema (not)" +) + +// Warning messages related to schema validation and returned as results +const () + +func invalidSchemaProvidedMsg(err error) errors.Error { + return errors.New(InternalErrorCode, InvalidSchemaProvidedError, err) +} +func invalidTypeConversionMsg(path string, err error) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidTypeConversionError, path, err) +} +func mustValidateOnlyOneSchemaMsg(path, additionalMsg string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateOnlyOneSchemaError, path, additionalMsg) +} +func mustValidateAtLeastOneSchemaMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateAtLeastOneSchemaError, path) +} +func mustValidateAllSchemasMsg(path, additionalMsg string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustValidateAllSchemasError, path, additionalMsg) +} +func mustNotValidatechemaMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, MustNotValidateSchemaError, path) +} +func hasADependencyMsg(path, depkey string) errors.Error { + return errors.New(errors.CompositeErrorCode, HasDependencyError, path, depkey) +} +func arrayDoesNotAllowAdditionalItemsMsg() errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayDoesNotAllowAdditionalItemsError) +} diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go new file mode 100644 index 00000000000..d9fd21a75a1 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +// SchemaValidatorOptions defines optional rules for schema validation +type SchemaValidatorOptions struct { + EnableObjectArrayTypeCheck bool + EnableArrayMustHaveItemsCheck bool + recycleValidators bool + recycleResult bool + skipSchemataResult bool +} + +// Option sets optional rules for schema validation +type Option func(*SchemaValidatorOptions) + +// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array +func EnableObjectArrayTypeCheck(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableObjectArrayTypeCheck = enable + } +} + +// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined +func EnableArrayMustHaveItemsCheck(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableArrayMustHaveItemsCheck = enable + } +} + +// SwaggerSchema activates swagger schema validation rules +func SwaggerSchema(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.EnableObjectArrayTypeCheck = enable + svo.EnableArrayMustHaveItemsCheck = enable + } +} + +// WithRecycleValidators saves memory allocations and makes validators +// available for a single use of Validate() only. +// +// When a validator is recycled, called MUST not call the Validate() method twice. +func WithRecycleValidators(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleValidators = enable + } +} + +func withRecycleResults(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleResult = enable + } +} + +// WithSkipSchemataResult skips the deep audit payload stored in validation Result +func WithSkipSchemataResult(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.skipSchemataResult = enable + } +} + +// Options returns the current set of options +func (svo SchemaValidatorOptions) Options() []Option { + return []Option{ + EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), + EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck), + WithRecycleValidators(svo.recycleValidators), + withRecycleResults(svo.recycleResult), + WithSkipSchemataResult(svo.skipSchemataResult), + } +} diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go new file mode 100644 index 00000000000..485f536adc3 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -0,0 +1,345 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type schemaPropsValidator struct { + Path string + In string + AllOf []spec.Schema + OneOf []spec.Schema + AnyOf []spec.Schema + Not *spec.Schema + Dependencies spec.Dependencies + anyOfValidators []*SchemaValidator + allOfValidators []*SchemaValidator + oneOfValidators []*SchemaValidator + notValidator *SchemaValidator + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func (s *schemaPropsValidator) SetPath(path string) { + s.Path = path +} + +func newSchemaPropsValidator( + path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root any, formats strfmt.Registry, + opts *SchemaValidatorOptions) *schemaPropsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + anyValidators := make([]*SchemaValidator, 0, len(anyOf)) + for i := range anyOf { + anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts)) + } + allValidators := make([]*SchemaValidator, 0, len(allOf)) + for i := range allOf { + allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts)) + } + oneValidators := make([]*SchemaValidator, 0, len(oneOf)) + for i := range oneOf { + oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts)) + } + + var notValidator *SchemaValidator + if not != nil { + notValidator = newSchemaValidator(not, root, path, formats, opts) + } + + var s *schemaPropsValidator + if opts.recycleValidators { + s = pools.poolOfSchemaPropsValidators.BorrowValidator() + } else { + s = new(schemaPropsValidator) + } + + s.Path = path + s.In = in + s.AllOf = allOf + s.OneOf = oneOf + s.AnyOf = anyOf + s.Not = not + s.Dependencies = deps + s.anyOfValidators = anyValidators + s.allOfValidators = allValidators + s.oneOfValidators = oneValidators + s.notValidator = notValidator + s.Root = root + s.KnownFormats = formats + s.Options = opts + + return s +} + +func (s *schemaPropsValidator) Applies(source any, _ reflect.Kind) bool { + _, isSchema := source.(*spec.Schema) + return isSchema +} + +func (s *schemaPropsValidator) Validate(data any) *Result { + var mainResult *Result + if s.Options.recycleResult { + mainResult = pools.poolOfResults.BorrowResult() + } else { + mainResult = new(Result) + } + + // Intermediary error results + + // IMPORTANT! messages from underlying validators + var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() + + // results are redeemed when merged + }() + } + + if len(s.anyOfValidators) > 0 { + keepResultAnyOf = pools.poolOfResults.BorrowResult() + s.validateAnyOf(data, mainResult, keepResultAnyOf) + } + + if len(s.oneOfValidators) > 0 { + keepResultOneOf = pools.poolOfResults.BorrowResult() + s.validateOneOf(data, mainResult, keepResultOneOf) + } + + if len(s.allOfValidators) > 0 { + keepResultAllOf = pools.poolOfResults.BorrowResult() + s.validateAllOf(data, mainResult, keepResultAllOf) + } + + if s.notValidator != nil { + s.validateNot(data, mainResult) + } + + if len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { + s.validateDependencies(data, mainResult) + } + + mainResult.Inc() + + // In the end we retain best failures for schema validation + // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). + return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) +} + +func (s *schemaPropsValidator) validateAnyOf(data any, mainResult, keepResultAnyOf *Result) { + // Validates at least one in anyOf schemas + var bestFailures *Result + + for i, anyOfSchema := range s.anyOfValidators { + result := anyOfSchema.Validate(data) + if s.Options.recycleValidators { + s.anyOfValidators[i] = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + + _ = keepResultAnyOf.cleared() + mainResult.Merge(result) + + return + } + + // MatchCount is used to select errors from the schema with most positive checks + if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + bestFailures = result + + continue + } + + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + } + + mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + mainResult.Merge(bestFailures) +} + +func (s *schemaPropsValidator) validateOneOf(data any, mainResult, keepResultOneOf *Result) { + // Validates exactly one in oneOf schemas + var ( + firstSuccess, bestFailures *Result + validated int + ) + + for i, oneOfSchema := range s.oneOfValidators { + result := oneOfSchema.Validate(data) + if s.Options.recycleValidators { + s.oneOfValidators[i] = nil + } + + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + validated++ + _ = keepResultOneOf.cleared() + + if firstSuccess == nil { + firstSuccess = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + + continue + } + + // MatchCount is used to select errors from the schema with most positive checks + if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + bestFailures = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + } + + switch validated { + case 0: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid")) + mainResult.Merge(bestFailures) + // firstSucess necessarily nil + case 1: + mainResult.Merge(firstSuccess) + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + default: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated))) + mainResult.Merge(bestFailures) + if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(firstSuccess) + } + } +} + +func (s *schemaPropsValidator) validateAllOf(data any, mainResult, keepResultAllOf *Result) { + // Validates all of allOf schemas + var validated int + + for i, allOfSchema := range s.allOfValidators { + result := allOfSchema.Validate(data) + if s.Options.recycleValidators { + s.allOfValidators[i] = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAllOf.Merge(result.keepRelevantErrors()) + if result.IsValid() { + validated++ + } + mainResult.Merge(result) + } + + switch validated { + case 0: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated")) + case len(s.allOfValidators): + default: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, "")) + } +} + +func (s *schemaPropsValidator) validateNot(data any, mainResult *Result) { + result := s.notValidator.Validate(data) + if s.Options.recycleValidators { + s.notValidator = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + if result.IsValid() { + mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + } + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } +} + +func (s *schemaPropsValidator) validateDependencies(data any, mainResult *Result) { + val := data.(map[string]any) + for key := range val { + dep, ok := s.Dependencies[key] + if !ok { + continue + } + + if dep.Schema != nil { + mainResult.Merge( + newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data), + ) + continue + } + + if len(dep.Property) > 0 { + for _, depKey := range dep.Property { + if _, ok := val[depKey]; !ok { + mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) + } + } + } + } +} + +func (s *schemaPropsValidator) redeem() { + pools.poolOfSchemaPropsValidators.RedeemValidator(s) +} + +func (s *schemaPropsValidator) redeemChildren() { + for _, v := range s.anyOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.anyOfValidators = nil + + for _, v := range s.allOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.allOfValidators = nil + + for _, v := range s.oneOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.oneOfValidators = nil + + if s.notValidator != nil { + s.notValidator.redeemChildren() + s.notValidator.redeem() + s.notValidator = nil + } +} diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go new file mode 100644 index 00000000000..4a5a2089687 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -0,0 +1,139 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +type schemaSliceValidator struct { + Path string + In string + MaxItems *int64 + MinItems *int64 + UniqueItems bool + AdditionalItems *spec.SchemaOrBool + Items *spec.SchemaOrArray + Root any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newSliceValidator(path, in string, + maxItems, minItems *int64, uniqueItems bool, + additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray, + root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *schemaSliceValidator + if opts.recycleValidators { + v = pools.poolOfSliceValidators.BorrowValidator() + } else { + v = new(schemaSliceValidator) + } + + v.Path = path + v.In = in + v.MaxItems = maxItems + v.MinItems = minItems + v.UniqueItems = uniqueItems + v.AdditionalItems = additionalItems + v.Items = items + v.Root = root + v.KnownFormats = formats + v.Options = opts + + return v +} + +func (s *schemaSliceValidator) SetPath(path string) { + s.Path = path +} + +func (s *schemaSliceValidator) Applies(source any, kind reflect.Kind) bool { + _, ok := source.(*spec.Schema) + r := ok && kind == reflect.Slice + return r +} + +func (s *schemaSliceValidator) Validate(data any) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + if data == nil { + return result + } + val := reflect.ValueOf(data) + size := val.Len() + + if s.Items != nil && s.Items.Schema != nil { + for i := range size { + validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options) + validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i)) + value := val.Index(i) + result.mergeForSlice(val, i, validator.Validate(value.Interface())) + } + } + + itemsSize := 0 + if s.Items != nil && len(s.Items.Schemas) > 0 { + itemsSize = len(s.Items.Schemas) + for i := range itemsSize { + if size <= i { + break + } + + validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) + result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) + } + } + if s.AdditionalItems != nil && itemsSize < size { + if s.Items != nil && len(s.Items.Schemas) > 0 && !s.AdditionalItems.Allows { + result.AddErrors(arrayDoesNotAllowAdditionalItemsMsg()) + } + if s.AdditionalItems.Schema != nil { + for i := itemsSize; i < size-itemsSize+1; i++ { + validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) + result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) + } + } + } + + if s.MinItems != nil { + if err := MinItems(s.Path, s.In, int64(size), *s.MinItems); err != nil { + result.AddErrors(err) + } + } + if s.MaxItems != nil { + if err := MaxItems(s.Path, s.In, int64(size), *s.MaxItems); err != nil { + result.AddErrors(err) + } + } + if s.UniqueItems { + if err := UniqueItems(s.Path, s.In, val.Interface()); err != nil { + result.AddErrors(err) + } + } + result.Inc() + return result +} + +func (s *schemaSliceValidator) redeem() { + pools.poolOfSliceValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go new file mode 100644 index 00000000000..8616a861f28 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -0,0 +1,845 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "slices" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/jsonutils" +) + +// Spec validates an OpenAPI 2.0 specification document. +// +// Returns an error flattening in a single standard error, all validation messages. +// +// - TODO: $ref should not have siblings +// - TODO: make sure documentation reflects all checks and warnings +// - TODO: check on discriminators +// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) +// - TODO: full list of unresolved refs +// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - TODO: option to determine if we validate for go-swagger or in a more general context +// - TODO: check on required properties to support anyOf, allOf, oneOf +// +// NOTE: SecurityScopes are maps: no need to check uniqueness +func Spec(doc *loads.Document, formats strfmt.Registry) error { + errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) + if errs.HasErrors() { + return errors.CompositeValidationError(errs.Errors...) + } + return nil +} + +// SpecValidator validates a swagger 2.0 spec +type SpecValidator struct { + schema *spec.Schema // swagger 2.0 schema + spec *loads.Document + analyzer *analysis.Spec + expanded *loads.Document + KnownFormats strfmt.Registry + Options Opts // validation options + schemaOptions *SchemaValidatorOptions +} + +// NewSpecValidator creates a new swagger spec validator instance +func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { + // schema options that apply to all called validators + schemaOptions := new(SchemaValidatorOptions) + for _, o := range []Option{ + SwaggerSchema(true), + WithRecycleValidators(true), + // withRecycleResults(true), + } { + o(schemaOptions) + } + + return &SpecValidator{ + schema: schema, + KnownFormats: formats, + Options: defaultOpts, + schemaOptions: schemaOptions, + } +} + +// Validate validates the swagger spec +func (s *SpecValidator) Validate(data any) (*Result, *Result) { + s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult + var sd *loads.Document + errs, warnings := new(Result), new(Result) + + if v, ok := data.(*loads.Document); ok { + sd = v + } + if sd == nil { + errs.AddErrors(invalidDocumentMsg()) + return errs, warnings // no point in continuing + } + s.spec = sd + s.analyzer = analysis.New(sd.Spec()) + + // Raw spec unmarshalling errors + var obj any + if err := json.Unmarshal(sd.Raw(), &obj); err != nil { + // NOTE: under normal conditions, the *load.Document has been already unmarshalled + // So this one is just a paranoid check on the behavior of the spec package + panic(InvalidDocumentError) + } + + defer func() { + // errs holds all errors and warnings, + // warnings only warnings + errs.MergeAsWarnings(warnings) + warnings.AddErrors(errs.Warnings...) + }() + + // Swagger schema validator + schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions) + errs.Merge(schv.Validate(obj)) // error - + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + errs.Merge(s.validateReferencesValid()) // error - + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + errs.Merge(s.validateDuplicateOperationIDs()) + errs.Merge(s.validateDuplicatePropertyNames()) // error - + errs.Merge(s.validateParameters()) // error - + errs.Merge(s.validateItems()) // error - + + // Properties in required definition MUST validate their schema + // Properties SHOULD NOT be declared as both required and readOnly (warning) + errs.Merge(s.validateRequiredDefinitions()) // error and warning + + // There may be a point in continuing to try and determine more accurate errors + if !s.Options.ContinueOnErrors && errs.HasErrors() { + return errs, warnings // no point in continuing + } + + // Values provided as default MUST validate their schema + df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions} + errs.Merge(df.Validate()) + + // Values provided as examples MUST validate their schema + // Value provided as examples in a response without schema generate a warning + // Known limitations: examples in responses for mime type not application/json are ignored (warning) + ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions} + errs.Merge(ex.Validate()) + + errs.Merge(s.validateNonEmptyPathParamNames()) + + // errs.Merge(s.validateRefNoSibling()) // warning only + errs.Merge(s.validateReferenced()) // warning only + + return errs, warnings +} + +// SetContinueOnErrors sets the ContinueOnErrors option for this validator. +func (s *SpecValidator) SetContinueOnErrors(c bool) { + s.Options.ContinueOnErrors = c +} + +func (s *SpecValidator) validateNonEmptyPathParamNames() *Result { + res := pools.poolOfResults.BorrowResult() + if s.spec.Spec().Paths == nil { + // There is no Paths object: error + res.AddErrors(noValidPathMsg()) + + return res + } + + if s.spec.Spec().Paths.Paths == nil { + // Paths may be empty: warning + res.AddWarnings(noValidPathMsg()) + + return res + } + + for k := range s.spec.Spec().Paths.Paths { + if strings.Contains(k, "{}") { + res.AddErrors(emptyPathParameterMsg(k)) + } + } + + return res +} + +func (s *SpecValidator) validateDuplicateOperationIDs() *Result { + // OperationID, if specified, must be unique across the board + var analyzer *analysis.Spec + if s.expanded != nil { + // $ref are valid: we can analyze operations on an expanded spec + analyzer = analysis.New(s.expanded.Spec()) + } else { + // fallback on possible incomplete picture because of previous errors + analyzer = s.analyzer + } + res := pools.poolOfResults.BorrowResult() + known := make(map[string]int) + for _, v := range analyzer.OperationIDs() { + if v != "" { + known[v]++ + } + } + for k, v := range known { + if v > 1 { + res.AddErrors(nonUniqueOperationIDMsg(k, v)) + } + } + return res +} + +type dupProp struct { + Name string + Definition string +} + +func (s *SpecValidator) validateDuplicatePropertyNames() *Result { + // definition can't declare a property that's already defined by one of its ancestors + res := pools.poolOfResults.BorrowResult() + for k, sch := range s.spec.Spec().Definitions { + if len(sch.AllOf) == 0 { + continue + } + + knownanc := map[string]struct{}{ + "#/definitions/" + k: {}, + } + + ancs, rec := s.validateCircularAncestry(k, sch, knownanc) + if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) { + res.Merge(rec) + } + if len(ancs) > 0 { + res.AddErrors(circularAncestryDefinitionMsg(k, ancs)) + return res + } + + knowns := make(map[string]struct{}) + dups, rep := s.validateSchemaPropertyNames(k, sch, knowns) + if rep != nil && (rep.HasErrors() || rep.HasWarnings()) { + res.Merge(rep) + } + if len(dups) > 0 { + var pns []string + for _, v := range dups { + pns = append(pns, v.Definition+"."+v.Name) + } + res.AddErrors(duplicatePropertiesMsg(k, pns)) + } + + } + return res +} + +func (s *SpecValidator) resolveRef(ref *spec.Ref) (*spec.Schema, error) { + if s.spec.SpecFilePath() != "" { + return spec.ResolveRefWithBase(s.spec.Spec(), ref, &spec.ExpandOptions{RelativeBase: s.spec.SpecFilePath()}) + } + // NOTE: it looks like with the new spec resolver, this code is now unrecheable + return spec.ResolveRef(s.spec.Spec(), ref) +} + +func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, knowns map[string]struct{}) ([]dupProp, *Result) { + var dups []dupProp + + schn := nm + schc := &sch + res := pools.poolOfResults.BorrowResult() + + for schc.Ref.String() != "" { + // gather property names + reso, err := s.resolveRef(&schc.Ref) + if err != nil { + errorHelp.addPointerError(res, err, schc.Ref.String(), nm) + return dups, res + } + schc = reso + schn = sch.Ref.String() + } + + if len(schc.AllOf) > 0 { + for _, chld := range schc.AllOf { + dup, rep := s.validateSchemaPropertyNames(schn, chld, knowns) + if rep != nil && (rep.HasErrors() || rep.HasWarnings()) { + res.Merge(rep) + } + dups = append(dups, dup...) + } + return dups, res + } + + for k := range schc.Properties { + _, ok := knowns[k] + if ok { + dups = append(dups, dupProp{Name: k, Definition: schn}) + } else { + knowns[k] = struct{}{} + } + } + + return dups, res +} + +func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) { + res := pools.poolOfResults.BorrowResult() + + if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there + return nil, res + } + var ancs []string + + schn := nm + schc := &sch + + for schc.Ref.String() != "" { + reso, err := s.resolveRef(&schc.Ref) + if err != nil { + errorHelp.addPointerError(res, err, schc.Ref.String(), nm) + return ancs, res + } + schc = reso + schn = sch.Ref.String() + } + + if schn != nm && schn != "" { + if _, ok := knowns[schn]; ok { + ancs = append(ancs, schn) + } + knowns[schn] = struct{}{} + + if len(ancs) > 0 { + return ancs, res + } + } + + if len(schc.AllOf) > 0 { + for _, chld := range schc.AllOf { + if chld.Ref.String() != "" || len(chld.AllOf) > 0 { + anc, rec := s.validateCircularAncestry(schn, chld, knowns) + if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) { + res.Merge(rec) + } + ancs = append(ancs, anc...) + if len(ancs) > 0 { + return ancs, res + } + } + } + } + return ancs, res +} + +func (s *SpecValidator) validateItems() *Result { + // validate parameter, items, schema and response objects for presence of item if type is array + res := pools.poolOfResults.BorrowResult() + + for method, pi := range s.analyzer.Operations() { + for path, op := range pi { + for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + + if param.TypeName() == arrayType && param.ItemsTypeName() == "" { + res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID)) + continue + } + if param.In != swaggerBody { + if param.Items != nil { + items := param.Items + for items.TypeName() == arrayType { + if items.ItemsTypeName() == "" { + res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID)) + break + } + items = items.Items + } + } + } else { + // In: body + if param.Schema != nil { + res.Merge(s.validateSchemaItems(*param.Schema, fmt.Sprintf("body param %q", param.Name), op.ID)) + } + } + } + + var responses []spec.Response + if op.Responses != nil { + if op.Responses.Default != nil { + responses = append(responses, *op.Responses.Default) + } + if op.Responses.StatusCodeResponses != nil { + for _, v := range op.Responses.StatusCodeResponses { + responses = append(responses, v) + } + } + } + + for _, resp := range responses { + // Response headers with array + for hn, hv := range resp.Headers { + if hv.TypeName() == arrayType && hv.ItemsTypeName() == "" { + res.AddErrors(arrayInHeaderRequiresItemsMsg(hn, op.ID)) + } + } + if resp.Schema != nil { + res.Merge(s.validateSchemaItems(*resp.Schema, "response body", op.ID)) + } + } + } + } + return res +} + +// Verifies constraints on array type +func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { + res := pools.poolOfResults.BorrowResult() + if !schema.Type.Contains(arrayType) { + return res + } + + if schema.Items == nil || schema.Items.Len() == 0 { + res.AddErrors(arrayRequiresItemsMsg(prefix, opID)) + return res + } + + if schema.Items.Schema != nil { + schema = *schema.Items.Schema + if _, err := compileRegexp(schema.Pattern); err != nil { + res.AddErrors(invalidItemsPatternMsg(prefix, opID, schema.Pattern)) + } + + res.Merge(s.validateSchemaItems(schema, prefix, opID)) + } + return res +} + +func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result { + // Each defined operation path parameters must correspond to a named element in the API's path pattern. + // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.) + res := pools.poolOfResults.BorrowResult() + for _, l := range fromPath { + var matched bool + for _, r := range fromOperation { + if l == "{"+r+"}" { + matched = true + break + } + } + if !matched { + res.AddErrors(noParameterInPathMsg(l)) + } + } + + for _, p := range fromOperation { + var matched bool + if slices.Contains(fromPath, "{"+p+"}") { + matched = true + } + if !matched { + res.AddErrors(pathParamNotInPathMsg(path, p)) + } + } + + return res +} + +func (s *SpecValidator) validateReferenced() *Result { + var res Result + res.MergeAsWarnings(s.validateReferencedParameters()) + res.MergeAsWarnings(s.validateReferencedResponses()) + res.MergeAsWarnings(s.validateReferencedDefinitions()) + return &res +} + +func (s *SpecValidator) validateReferencedParameters() *Result { + // Each referenceable definition should have references. + params := s.spec.Spec().Parameters + if len(params) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range params { + expected["#/parameters/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllParameterReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + result := pools.poolOfResults.BorrowResult() + for k := range expected { + result.AddWarnings(unusedParamMsg(k)) + } + return result +} + +func (s *SpecValidator) validateReferencedResponses() *Result { + // Each referenceable definition should have references. + responses := s.spec.Spec().Responses + if len(responses) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range responses { + expected["#/responses/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllResponseReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + result := pools.poolOfResults.BorrowResult() + for k := range expected { + result.AddWarnings(unusedResponseMsg(k)) + } + return result +} + +func (s *SpecValidator) validateReferencedDefinitions() *Result { + // Each referenceable definition must have references. + defs := s.spec.Spec().Definitions + if len(defs) == 0 { + return nil + } + + expected := make(map[string]struct{}) + for k := range defs { + expected["#/definitions/"+jsonpointer.Escape(k)] = struct{}{} + } + for _, k := range s.analyzer.AllDefinitionReferences() { + delete(expected, k) + } + + if len(expected) == 0 { + return nil + } + + result := new(Result) + for k := range expected { + result.AddWarnings(unusedDefinitionMsg(k)) + } + return result +} + +func (s *SpecValidator) validateRequiredDefinitions() *Result { + // Each property listed in the required array must be defined in the properties of the model + res := pools.poolOfResults.BorrowResult() + +DEFINITIONS: + for d, schema := range s.spec.Spec().Definitions { + if schema.Required != nil { // Safeguard + for _, pn := range schema.Required { + red := s.validateRequiredProperties(pn, d, &schema) //#nosec + res.Merge(red) + if !red.IsValid() && !s.Options.ContinueOnErrors { + break DEFINITIONS // there is an error, let's stop that bleeding + } + } + } + } + return res +} + +func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result { + // Takes care of recursive property definitions, which may be nested in additionalProperties schemas + res := pools.poolOfResults.BorrowResult() + propertyMatch := false + patternMatch := false + additionalPropertiesMatch := false + isReadOnly := false + + // Regular properties + if _, ok := v.Properties[path]; ok { + propertyMatch = true + isReadOnly = v.Properties[path].ReadOnly + } + + // NOTE: patternProperties are not supported in swagger. Even though, we continue validation here + // We check all defined patterns: if one regexp is invalid, croaks an error + for pp, pv := range v.PatternProperties { + re, err := compileRegexp(pp) + if err != nil { + res.AddErrors(invalidPatternMsg(pp, in)) + } else if re.MatchString(path) { + patternMatch = true + if !propertyMatch { + isReadOnly = pv.ReadOnly + } + } + } + + if !propertyMatch && !patternMatch { + if v.AdditionalProperties != nil { + if v.AdditionalProperties.Allows && v.AdditionalProperties.Schema == nil { + additionalPropertiesMatch = true + } else if v.AdditionalProperties.Schema != nil { + // additionalProperties as schema are upported in swagger + // recursively validates additionalProperties schema + // TODO : anyOf, allOf, oneOf like in schemaPropsValidator + red := s.validateRequiredProperties(path, in, v.AdditionalProperties.Schema) + if red.IsValid() { + additionalPropertiesMatch = true + if !propertyMatch && !patternMatch { + isReadOnly = v.AdditionalProperties.Schema.ReadOnly + } + } + res.Merge(red) + } + } + } + + if !propertyMatch && !patternMatch && !additionalPropertiesMatch { + res.AddErrors(requiredButNotDefinedMsg(path, in)) + } + + if isReadOnly { + res.AddWarnings(readOnlyAndRequiredMsg(in, path)) + } + return res +} + +func (s *SpecValidator) validateParameters() *Result { + // - for each method, path is unique, regardless of path parameters + // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are + // considered duplicate paths, if StrictPathParamUniqueness is enabled. + // - each parameter should have a unique `name` and `type` combination + // - each operation should have only 1 parameter of type body + // - there must be at most 1 parameter in body + // - parameters with pattern property must specify valid patterns + // - $ref in parameters must resolve + // - path param must be required + res := pools.poolOfResults.BorrowResult() + rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) + for method, pi := range s.expandedAnalyzer().Operations() { + methodPaths := make(map[string]map[string]string) + for path, op := range pi { + if s.Options.StrictPathParamUniqueness { + pathToAdd := pathHelp.stripParametersInPath(path) + + // Warn on garbled path afer param stripping + if rexGarbledPathSegment.MatchString(pathToAdd) { + res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) + } + + // Check uniqueness of stripped paths + if _, found := methodPaths[method][pathToAdd]; found { + + // Sort names for stable, testable output + if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { + res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + } else { + res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) + } + } else { + if _, found := methodPaths[method]; !found { + methodPaths[method] = map[string]string{} + } + methodPaths[method][pathToAdd] = path // Original non stripped path + + } + } + + var bodyParams []string + var paramNames []string + var hasForm, hasBody bool + + // Check parameters names uniqueness for operation + // TODO: should be done after param expansion + res.Merge(s.checkUniqueParams(path, method, op)) + + // pick the root schema from the swagger specification which describes a parameter + origSchema, ok := s.schema.Definitions["parameter"] + if !ok { + panic("unexpected swagger schema: missing #/definitions/parameter") + } + // clone it once to avoid expanding a global schema (e.g. swagger spec) + paramSchema, err := deepCloneSchema(origSchema) + if err != nil { + panic(fmt.Errorf("can't clone schema: %w", err)) + } + + for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation) + schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions) + var obj any + if err := jsonutils.FromDynamicJSON(pr, &obj); err != nil { + res.AddErrors(err) + + return res + } + + res.Merge(schv.Validate(obj)) + + // Validate pattern regexp for parameters with a Pattern property + if _, err := compileRegexp(pr.Pattern); err != nil { + res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern)) + } + + // There must be at most one parameter in body: list them all + if pr.In == swaggerBody { + bodyParams = append(bodyParams, fmt.Sprintf("%q", pr.Name)) + hasBody = true + } + + if pr.In == "path" { + paramNames = append(paramNames, pr.Name) + // Path declared in path must have the required: true property + if !pr.Required { + res.AddErrors(pathParamRequiredMsg(op.ID, pr.Name)) + } + } + + if pr.In == "formData" { + hasForm = true + } + + if pr.Type != numberType && pr.Type != integerType && + (pr.Maximum != nil || pr.Minimum != nil || pr.MultipleOf != nil) { + // A non-numeric parameter has validation keywords for numeric instances (number and integer) + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + + if pr.Type != stringType && + // A non-string parameter has validation keywords for strings + (pr.MaxLength != nil || pr.MinLength != nil || pr.Pattern != "") { + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + + if pr.Type != arrayType && + // A non-array parameter has validation keywords for arrays + (pr.MaxItems != nil || pr.MinItems != nil || pr.UniqueItems) { + res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type)) + } + } + + // In:formData and In:body are mutually exclusive + if hasBody && hasForm { + res.AddErrors(bothFormDataAndBodyMsg(op.ID)) + } + // There must be at most one body param + // Accurately report situations when more than 1 body param is declared (possibly unnamed) + if len(bodyParams) > 1 { + sort.Strings(bodyParams) + res.AddErrors(multipleBodyParamMsg(op.ID, bodyParams)) + } + + // Check uniqueness of parameters in path + paramsInPath := pathHelp.extractPathParams(path) + for i, p := range paramsInPath { + for j, q := range paramsInPath { + if p == q && i > j { + res.AddErrors(pathParamNotUniqueMsg(path, p, q)) + break + } + } + } + + // Warns about possible malformed params in path + rexGarbledParam := mustCompileRegexp(`{.*[{}\s]+.*}`) + for _, p := range paramsInPath { + if rexGarbledParam.MatchString(p) { + res.AddWarnings(pathParamGarbledMsg(path, p)) + } + } + + // Match params from path vs params from params section + res.Merge(s.validatePathParamPresence(path, paramsInPath, paramNames)) + } + } + return res +} + +func (s *SpecValidator) validateReferencesValid() *Result { + // each reference must point to a valid object + res := pools.poolOfResults.BorrowResult() + for _, r := range s.analyzer.AllRefs() { + if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI + res.AddErrors(invalidRefMsg(r.String())) + } + } + if !res.HasErrors() { + // NOTE: with default settings, loads.Document.Expanded() + // stops on first error. Anyhow, the expand option to continue + // on errors fails to report errors at all. + exp, err := s.spec.Expanded() + if err != nil { + res.AddErrors(unresolvedReferencesMsg(err)) + } + s.expanded = exp + } + return res +} + +func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operation) *Result { + // Check for duplicate parameters declaration in param section. + // Each parameter should have a unique `name` and `type` combination + // NOTE: this could be factorized in analysis (when constructing the params map) + // However, there are some issues with such a factorization: + // - analysis does not seem to fully expand params + // - param keys may be altered by x-go-name + res := pools.poolOfResults.BorrowResult() + pnames := make(map[string]struct{}) + + if op.Parameters != nil { // Safeguard + for _, ppr := range op.Parameters { + var ok bool + pr, red := paramHelp.resolveParam(path, method, op.ID, &ppr, s) //#nosec + res.Merge(red) + + if pr != nil && pr.Name != "" { // params with empty name does no participate the check + key := fmt.Sprintf("%s#%s", pr.In, pr.Name) + + if _, ok = pnames[key]; ok { + res.AddErrors(duplicateParamNameMsg(pr.In, pr.Name, op.ID)) + } + pnames[key] = struct{}{} + } + } + } + return res +} + +// expandedAnalyzer returns expanded.Analyzer when it is available. +// otherwise just analyzer. +func (s *SpecValidator) expandedAnalyzer() *analysis.Spec { + if s.expanded != nil && s.expanded.Analyzer != nil { + return s.expanded.Analyzer + } + return s.analyzer +} + +func deepCloneSchema(src spec.Schema) (spec.Schema, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return spec.Schema{}, err + } + + var dst spec.Schema + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return spec.Schema{}, err + } + + return dst, nil +} diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go new file mode 100644 index 00000000000..9b079af647a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -0,0 +1,355 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "net/http" + + "github.com/go-openapi/errors" +) + +// Error messages related to spec validation and returned as results. +const ( + // ArrayRequiresItemsError ... + ArrayRequiresItemsError = "%s for %q is a collection without an element type (array requires items definition)" + + // ArrayInParamRequiresItemsError ... + ArrayInParamRequiresItemsError = "param %q for %q is a collection without an element type (array requires item definition)" + + // ArrayInHeaderRequiresItemsError ... + ArrayInHeaderRequiresItemsError = "header %q for %q is a collection without an element type (array requires items definition)" + + // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden + BothFormDataAndBodyError = "operation %q has both formData and body parameters. Only one such In: type may be used for a given operation" + + // CannotResolveReferenceError when a $ref could not be resolved + CannotResolveReferenceError = "could not resolve reference in %s to $ref %s: %v" + + // CircularAncestryDefinitionError ... + CircularAncestryDefinitionError = "definition %q has circular ancestry: %v" + + // DefaultValueDoesNotValidateError results from an invalid default value provided + DefaultValueDoesNotValidateError = "default value for %s in %s does not validate its schema" + + // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items + DefaultValueItemsDoesNotValidateError = "default value for %s.items in %s does not validate its schema" + + // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header + DefaultValueHeaderDoesNotValidateError = "in operation %q, default value in header %s for %s does not validate its schema" + + // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items + DefaultValueHeaderItemsDoesNotValidateError = "in operation %q, default value in header.items %s for %s does not validate its schema" + + // DefaultValueInDoesNotValidateError ... + DefaultValueInDoesNotValidateError = "in operation %q, default value in %s does not validate its schema" + + // DuplicateParamNameError ... + DuplicateParamNameError = "duplicate parameter name %q for %q in operation %q" + + // DuplicatePropertiesError ... + DuplicatePropertiesError = "definition %q contains duplicate properties: %v" + + // ExampleValueDoesNotValidateError results from an invalid example value provided + ExampleValueDoesNotValidateError = "example value for %s in %s does not validate its schema" + + // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items + ExampleValueItemsDoesNotValidateError = "example value for %s.items in %s does not validate its schema" + + // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header + ExampleValueHeaderDoesNotValidateError = "in operation %q, example value in header %s for %s does not validate its schema" + + // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items + ExampleValueHeaderItemsDoesNotValidateError = "in operation %q, example value in header.items %s for %s does not validate its schema" + + // ExampleValueInDoesNotValidateError ... + ExampleValueInDoesNotValidateError = "in operation %q, example value in %s does not validate its schema" + + // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}") + EmptyPathParameterError = "%q contains an empty path parameter" + + // InvalidDocumentError states that spec validation only processes spec.Document objects + InvalidDocumentError = "spec validator can only validate spec.Document objects" + + // InvalidItemsPatternError indicates an Items definition with invalid pattern + InvalidItemsPatternError = "%s for %q has invalid items pattern: %q" + + // InvalidParameterDefinitionError indicates an error detected on a parameter definition + InvalidParameterDefinitionError = "invalid definition for parameter %s in %s in operation %q" + + // InvalidParameterDefinitionAsSchemaError indicates an error detected on a parameter definition, which was mistaken with a schema definition. + // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the parameter definition. + InvalidParameterDefinitionAsSchemaError = "invalid definition as Schema for parameter %s in %s in operation %q" + + // InvalidPatternError ... + InvalidPatternError = "pattern %q is invalid in %s" + + // InvalidPatternInError indicates an invalid pattern in a schema or items definition + InvalidPatternInError = "%s in %s has invalid pattern: %q" + + // InvalidPatternInHeaderError indicates a header definition with an invalid pattern + InvalidPatternInHeaderError = "in operation %q, header %s for %s has invalid pattern %q: %v" + + // InvalidPatternInParamError ... + InvalidPatternInParamError = "operation %q has invalid pattern in param %q: %q" + + // InvalidReferenceError indicates that a $ref property could not be resolved + InvalidReferenceError = "invalid ref %q" + + // InvalidResponseDefinitionAsSchemaError indicates an error detected on a response definition, which was mistaken with a schema definition. + // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the response definition. + InvalidResponseDefinitionAsSchemaError = "invalid definition as Schema for response %s in %s" + + // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body + MultipleBodyParamError = "operation %q has more than 1 body param: %v" + + // NonUniqueOperationIDError indicates that the same operationId has been specified several times + NonUniqueOperationIDError = "%q is defined %d times" + + // NoParameterInPathError indicates that a path was found without any parameter + NoParameterInPathError = "path param %q has no parameter definition" + + // NoValidPathErrorOrWarning indicates that no single path could be validated. If Paths is empty, this message is only a warning. + NoValidPathErrorOrWarning = "spec has no valid path defined" + + // NoValidResponseError indicates that no valid response description could be found for an operation + NoValidResponseError = "operation %q has no valid response" + + // PathOverlapError ... + PathOverlapError = "path %s overlaps with %s" + + // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification + PathParamNotInPathError = "path param %q is not present in path %q" + + // PathParamNotUniqueError ... + PathParamNotUniqueError = "params in path %q must be unique: %q conflicts with %q" + + // PathParamRequiredError ... + PathParamRequiredError = "in operation %q,path param %q must be declared as required" + + // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger + RefNotAllowedInHeaderError = "IMPORTANT!in %q: $ref are not allowed in headers. In context for header %q%s" + + // RequiredButNotDefinedError ... + RequiredButNotDefinedError = "%q is present in required but not defined as property in definition %q" + + // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on + SomeParametersBrokenError = "some parameters definitions are broken in %q.%s. Cannot carry on full checks on parameters for operation %s" + + // UnresolvedReferencesError indicates that at least one $ref could not be resolved + UnresolvedReferencesError = "some references could not be resolved in spec. First found: %v" +) + +// Warning messages related to spec validation and returned as results +const ( + // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against + ExamplesWithoutSchemaWarning = "Examples provided without schema in operation %q, %s" + + // ExamplesMimeNotSupportedWarning indicates that examples are provided with a mime type different than application/json, which + // the validator dos not support yetl + ExamplesMimeNotSupportedWarning = "No validation attempt for examples for media types other than application/json, in operation %q, %s" + + // PathParamGarbledWarning ... + PathParamGarbledWarning = "in path %q, param %q contains {,} or white space. Albeit not stricly illegal, this is probably no what you want" + + // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type + ParamValidationTypeMismatch = "validation keywords of parameter %q in path %q don't match its type %s" + + // PathStrippedParamGarbledWarning ... + PathStrippedParamGarbledWarning = "path stripped from path parameters %s contains {,} or white space. This is probably no what you want." + + // ReadOnlyAndRequiredWarning ... + ReadOnlyAndRequiredWarning = "Required property %s in %q should not be marked as both required and readOnly" + + // RefShouldNotHaveSiblingsWarning indicates that a $ref was found with a sibling definition. This results in the $ref taking over its siblings, + // which is most likely not wanted. + RefShouldNotHaveSiblingsWarning = "$ref property should have no sibling in %q.%s" + + // RequiredHasDefaultWarning indicates that a required parameter property should not have a default + RequiredHasDefaultWarning = "%s in %s has a default value and is required as parameter" + + // UnusedDefinitionWarning ... + UnusedDefinitionWarning = "definition %q is not used anywhere" + + // UnusedParamWarning ... + UnusedParamWarning = "parameter %q is not used anywhere" + + // UnusedResponseWarning ... + UnusedResponseWarning = "response %q is not used anywhere" + + InvalidObject = "expected an object in %q.%s" +) + +// Additional error codes +const ( + // InternalErrorCode reports an internal technical error + InternalErrorCode = http.StatusInternalServerError + // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found + NotFoundErrorCode = http.StatusNotFound +) + +func invalidDocumentMsg() errors.Error { + return errors.New(InternalErrorCode, InvalidDocumentError) +} +func invalidRefMsg(path string) errors.Error { + return errors.New(NotFoundErrorCode, InvalidReferenceError, path) +} +func unresolvedReferencesMsg(err error) errors.Error { + return errors.New(errors.CompositeErrorCode, UnresolvedReferencesError, err) +} +func noValidPathMsg() errors.Error { + return errors.New(errors.CompositeErrorCode, NoValidPathErrorOrWarning) +} +func emptyPathParameterMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, EmptyPathParameterError, path) +} +func nonUniqueOperationIDMsg(path string, i int) errors.Error { + return errors.New(errors.CompositeErrorCode, NonUniqueOperationIDError, path, i) +} +func circularAncestryDefinitionMsg(path string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, CircularAncestryDefinitionError, path, args) +} +func duplicatePropertiesMsg(path string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, DuplicatePropertiesError, path, args) +} +func pathParamNotInPathMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamNotInPathError, param, path) +} +func arrayRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayRequiresItemsError, path, operation) +} +func arrayInParamRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayInParamRequiresItemsError, path, operation) +} +func arrayInHeaderRequiresItemsMsg(path, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, ArrayInHeaderRequiresItemsError, path, operation) +} +func invalidItemsPatternMsg(path, operation, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidItemsPatternError, path, operation, pattern) +} +func invalidPatternMsg(pattern, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternError, pattern, path) +} +func requiredButNotDefinedMsg(path, definition string) errors.Error { + return errors.New(errors.CompositeErrorCode, RequiredButNotDefinedError, path, definition) +} +func pathParamGarbledMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamGarbledWarning, path, param) +} +func pathStrippedParamGarbledMsg(path string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathStrippedParamGarbledWarning, path) +} +func pathOverlapMsg(path, arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathOverlapError, path, arg) +} +func invalidPatternInParamMsg(operation, param, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInParamError, operation, param, pattern) +} +func pathParamRequiredMsg(operation, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamRequiredError, operation, param) +} +func bothFormDataAndBodyMsg(operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, BothFormDataAndBodyError, operation) +} +func multipleBodyParamMsg(operation string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, MultipleBodyParamError, operation, args) +} +func pathParamNotUniqueMsg(path, param, arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, PathParamNotUniqueError, path, param, arg) +} +func duplicateParamNameMsg(path, param, operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, DuplicateParamNameError, param, path, operation) +} +func unusedParamMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedParamWarning, arg) +} +func unusedDefinitionMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedDefinitionWarning, arg) +} +func unusedResponseMsg(arg string) errors.Error { + return errors.New(errors.CompositeErrorCode, UnusedResponseWarning, arg) +} +func readOnlyAndRequiredMsg(path, param string) errors.Error { + return errors.New(errors.CompositeErrorCode, ReadOnlyAndRequiredWarning, param, path) +} +func noParameterInPathMsg(param string) errors.Error { + return errors.New(errors.CompositeErrorCode, NoParameterInPathError, param) +} +func requiredHasDefaultMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, RequiredHasDefaultWarning, param, path) +} +func defaultValueDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueDoesNotValidateError, param, path) +} +func defaultValueItemsDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueItemsDoesNotValidateError, param, path) +} +func noValidResponseMsg(operation string) errors.Error { + return errors.New(errors.CompositeErrorCode, NoValidResponseError, operation) +} +func defaultValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueHeaderDoesNotValidateError, operation, header, path) +} +func defaultValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueHeaderItemsDoesNotValidateError, operation, header, path) +} +func invalidPatternInHeaderMsg(operation, header, path, pattern string, args any) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInHeaderError, operation, header, path, pattern, args) +} +func invalidPatternInMsg(path, in, pattern string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidPatternInError, path, in, pattern) +} +func defaultValueInDoesNotValidateMsg(operation, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, DefaultValueInDoesNotValidateError, operation, path) +} +func exampleValueDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueDoesNotValidateError, param, path) +} +func exampleValueItemsDoesNotValidateMsg(param, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueItemsDoesNotValidateError, param, path) +} +func exampleValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueHeaderDoesNotValidateError, operation, header, path) +} +func exampleValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueHeaderItemsDoesNotValidateError, operation, header, path) +} +func exampleValueInDoesNotValidateMsg(operation, path string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExampleValueInDoesNotValidateError, operation, path) +} +func examplesWithoutSchemaMsg(operation, response string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExamplesWithoutSchemaWarning, operation, response) +} +func examplesMimeNotSupportedMsg(operation, response string) errors.Error { + return errors.New(errors.CompositeErrorCode, ExamplesMimeNotSupportedWarning, operation, response) +} +func refNotAllowedInHeaderMsg(path, header, ref string) errors.Error { + return errors.New(errors.CompositeErrorCode, RefNotAllowedInHeaderError, path, header, ref) +} +func cannotResolveRefMsg(path, ref string, err error) errors.Error { + return errors.New(errors.CompositeErrorCode, CannotResolveReferenceError, path, ref, err) +} +func invalidParameterDefinitionMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionError, path, method, operationID) +} +func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionAsSchemaError, path, method, operationID) +} +func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { + return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) +} +func invalidObjectMsg(path, in string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidObject, path, in) +} + +// disabled +// +// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { +// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) +// } +func someParametersBrokenMsg(path, method, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) +} +func refShouldNotHaveSiblingsMsg(path, operationID string) errors.Error { + return errors.New(errors.CompositeErrorCode, RefShouldNotHaveSiblingsWarning, operationID, path) +} diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go new file mode 100644 index 00000000000..9b9ab8d917d --- /dev/null +++ b/vendor/github.com/go-openapi/validate/type.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "reflect" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + "github.com/go-openapi/swag/fileutils" +) + +type typeValidator struct { + Path string + In string + Type spec.StringOrArray + Nullable bool + Format string + Options *SchemaValidatorOptions +} + +func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var t *typeValidator + if opts.recycleValidators { + t = pools.poolOfTypeValidators.BorrowValidator() + } else { + t = new(typeValidator) + } + + t.Path = path + t.In = in + t.Type = typ + t.Nullable = nullable + t.Format = format + t.Options = opts + + return t +} + +func (t *typeValidator) SetPath(path string) { + t.Path = path +} + +func (t *typeValidator) Applies(source any, _ reflect.Kind) bool { + // typeValidator applies to Schema, Parameter and Header objects + switch source.(type) { + case *spec.Schema: + case *spec.Parameter: + case *spec.Header: + default: + return false + } + + return (len(t.Type) > 0 || t.Format != "") +} + +func (t *typeValidator) Validate(data any) *Result { + if t.Options.recycleValidators { + defer func() { + t.redeem() + }() + } + + if data == nil { + // nil or zero value for the passed structure require Type: null + if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult) + } + + return emptyResult + } + + // check if the type matches, should be used in every validator chain as first item + val := reflect.Indirect(reflect.ValueOf(data)) + kind := val.Kind() + + // infer schema type (JSON) and format from passed data type + schType, format := t.schemaInfoForType(data) + + // check numerical types + // TODO: check unsigned ints + // TODO: check json.Number (see schema.go) + isLowerInt := t.Format == integerFormatInt64 && format == integerFormatInt32 + isLowerFloat := t.Format == numberFormatFloat64 && format == numberFormatFloat32 + isFloatInt := schType == numberType && conv.IsFloat64AJSONInteger(val.Float()) && t.Type.Contains(integerType) + isIntFloat := schType == integerType && t.Type.Contains(numberType) + + if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !t.Type.Contains(schType) && format != t.Format && !isFloatInt && !isIntFloat && !isLowerInt && !isLowerFloat { + // TODO: test case + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult) + } + + if !t.Type.Contains(numberType) && !t.Type.Contains(integerType) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) { + return emptyResult + } + + if !t.Type.Contains(schType) && !isFloatInt && !isIntFloat { + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult) + } + + return emptyResult +} + +func (t *typeValidator) schemaInfoForType(data any) (string, string) { + // internal type to JSON type with swagger 2.0 format (with go-openapi/strfmt extensions), + // see https://github.com/go-openapi/strfmt/blob/master/README.md + // TODO: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt. + switch data.(type) { + case []byte, strfmt.Base64, *strfmt.Base64: + return stringType, stringFormatByte + case strfmt.CreditCard, *strfmt.CreditCard: + return stringType, stringFormatCreditCard + case strfmt.Date, *strfmt.Date: + return stringType, stringFormatDate + case strfmt.DateTime, *strfmt.DateTime: + return stringType, stringFormatDateTime + case strfmt.Duration, *strfmt.Duration: + return stringType, stringFormatDuration + case fileutils.File, *fileutils.File: + return fileType, "" + case strfmt.Email, *strfmt.Email: + return stringType, stringFormatEmail + case strfmt.HexColor, *strfmt.HexColor: + return stringType, stringFormatHexColor + case strfmt.Hostname, *strfmt.Hostname: + return stringType, stringFormatHostname + case strfmt.IPv4, *strfmt.IPv4: + return stringType, stringFormatIPv4 + case strfmt.IPv6, *strfmt.IPv6: + return stringType, stringFormatIPv6 + case strfmt.ISBN, *strfmt.ISBN: + return stringType, stringFormatISBN + case strfmt.ISBN10, *strfmt.ISBN10: + return stringType, stringFormatISBN10 + case strfmt.ISBN13, *strfmt.ISBN13: + return stringType, stringFormatISBN13 + case strfmt.MAC, *strfmt.MAC: + return stringType, stringFormatMAC + case strfmt.ObjectId, *strfmt.ObjectId: + return stringType, stringFormatBSONObjectID + case strfmt.Password, *strfmt.Password: + return stringType, stringFormatPassword + case strfmt.RGBColor, *strfmt.RGBColor: + return stringType, stringFormatRGBColor + case strfmt.SSN, *strfmt.SSN: + return stringType, stringFormatSSN + case strfmt.URI, *strfmt.URI: + return stringType, stringFormatURI + case strfmt.UUID, *strfmt.UUID: + return stringType, stringFormatUUID + case strfmt.UUID3, *strfmt.UUID3: + return stringType, stringFormatUUID3 + case strfmt.UUID4, *strfmt.UUID4: + return stringType, stringFormatUUID4 + case strfmt.UUID5, *strfmt.UUID5: + return stringType, stringFormatUUID5 + // TODO: missing binary (io.ReadCloser) + // TODO: missing json.Number + default: + val := reflect.ValueOf(data) + tpe := val.Type() + switch tpe.Kind() { //nolint:exhaustive + case reflect.Bool: + return booleanType, "" + case reflect.String: + return stringType, "" + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + // NOTE: that is the spec. With go-openapi, is that not uint32 for unsigned integers? + return integerType, integerFormatInt32 + case reflect.Int, reflect.Int64, reflect.Uint, reflect.Uint64: + return integerType, integerFormatInt64 + case reflect.Float32: + // NOTE: is that not numberFormatFloat? + return numberType, numberFormatFloat32 + case reflect.Float64: + // NOTE: is that not "double"? + return numberType, numberFormatFloat64 + // NOTE: go arrays (reflect.Array) are not supported (fixed length) + case reflect.Slice: + return arrayType, "" + case reflect.Map, reflect.Struct: + return objectType, "" + case reflect.Interface: + // What to do here? + panic("dunno what to do here") + case reflect.Ptr: + return t.schemaInfoForType(reflect.Indirect(val).Interface()) + } + } + return "", "" +} + +func (t *typeValidator) redeem() { + pools.poolOfTypeValidators.RedeemValidator(t) +} diff --git a/vendor/github.com/go-openapi/validate/update-fixtures.sh b/vendor/github.com/go-openapi/validate/update-fixtures.sh new file mode 100644 index 00000000000..21b06e2b09a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/update-fixtures.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -eu -o pipefail +dir=$(git rev-parse --show-toplevel) +scratch=$(mktemp -d -t tmp.XXXXXXXXXX) + +function finish { + rm -rf "$scratch" +} +trap finish EXIT SIGHUP SIGINT SIGTERM + +cd "$scratch" +git clone https://github.com/json-schema-org/JSON-Schema-Test-Suite Suite +cp -r Suite/tests/draft4/* "$dir/fixtures/jsonschema_suite" +cp -a Suite/remotes "$dir/fixtures/jsonschema_suite" diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go new file mode 100644 index 00000000000..289a847fc7b --- /dev/null +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -0,0 +1,1040 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// An EntityValidator is an interface for things that can validate entities +type EntityValidator interface { + Validate(any) *Result +} + +type valueValidator interface { + SetPath(path string) + Applies(any, reflect.Kind) bool + Validate(any) *Result +} + +type itemsValidator struct { + items *spec.Items + root any + path string + in string + validators [6]valueValidator + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newItemsValidator(path, in string, items *spec.Items, root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var iv *itemsValidator + if opts.recycleValidators { + iv = pools.poolOfItemsValidators.BorrowValidator() + } else { + iv = new(itemsValidator) + } + + iv.path = path + iv.in = in + iv.items = items + iv.root = root + iv.KnownFormats = formats + iv.Options = opts + iv.validators = [6]valueValidator{ + iv.typeValidator(), + iv.stringValidator(), + iv.formatValidator(), + iv.numberValidator(), + iv.sliceValidator(), + iv.commonValidator(), + } + return iv +} + +func (i *itemsValidator) Validate(index int, data any) *Result { + if i.Options.recycleValidators { + defer func() { + i.redeemChildren() + i.redeem() + }() + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + var result *Result + if i.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + path := fmt.Sprintf("%s.%d", i.path, index) + + for idx, validator := range i.validators { + if !validator.Applies(i.root, kind) { + if i.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + validator.SetPath(path) + err := validator.Validate(data) + if i.Options.recycleValidators { + i.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + result.Inc() + if err.HasErrors() { + result.Merge(err) + + break + } + + result.Merge(err) + } + } + + return result +} + +func (i *itemsValidator) typeValidator() valueValidator { + return newTypeValidator( + i.path, + i.in, + spec.StringOrArray([]string{i.items.Type}), + i.items.Nullable, + i.items.Format, + i.Options, + ) +} + +func (i *itemsValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + "", + i.in, + i.items.Default, + i.items.Enum, + i.Options, + ) +} + +func (i *itemsValidator) sliceValidator() valueValidator { + return newBasicSliceValidator( + "", + i.in, + i.items.Default, + i.items.MaxItems, + i.items.MinItems, + i.items.UniqueItems, + i.items.Items, + i.root, + i.KnownFormats, + i.Options, + ) +} + +func (i *itemsValidator) numberValidator() valueValidator { + return newNumberValidator( + "", + i.in, + i.items.Default, + i.items.MultipleOf, + i.items.Maximum, + i.items.ExclusiveMaximum, + i.items.Minimum, + i.items.ExclusiveMinimum, + i.items.Type, + i.items.Format, + i.Options, + ) +} + +func (i *itemsValidator) stringValidator() valueValidator { + return newStringValidator( + "", + i.in, + i.items.Default, + false, // Required + false, // AllowEmpty + i.items.MaxLength, + i.items.MinLength, + i.items.Pattern, + i.Options, + ) +} + +func (i *itemsValidator) formatValidator() valueValidator { + return newFormatValidator( + "", + i.in, + i.items.Format, + i.KnownFormats, + i.Options, + ) +} + +func (i *itemsValidator) redeem() { + pools.poolOfItemsValidators.RedeemValidator(i) +} + +func (i *itemsValidator) redeemChildren() { + for idx, validator := range i.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // free up allocated children if not in pool + } +} + +type basicCommonValidator struct { + Path string + In string + Default any + Enum []any + Options *SchemaValidatorOptions +} + +func newBasicCommonValidator(path, in string, def any, enum []any, opts *SchemaValidatorOptions) *basicCommonValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var b *basicCommonValidator + if opts.recycleValidators { + b = pools.poolOfBasicCommonValidators.BorrowValidator() + } else { + b = new(basicCommonValidator) + } + + b.Path = path + b.In = in + b.Default = def + b.Enum = enum + b.Options = opts + + return b +} + +func (b *basicCommonValidator) SetPath(path string) { + b.Path = path +} + +func (b *basicCommonValidator) Applies(source any, _ reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Header: + return true + default: + return false + } +} + +func (b *basicCommonValidator) Validate(data any) (res *Result) { + if b.Options.recycleValidators { + defer func() { + b.redeem() + }() + } + + if len(b.Enum) == 0 { + return nil + } + + for _, enumValue := range b.Enum { + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard + continue + } + + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && + expectedValue.Type().ConvertibleTo(actualType) && + reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil + } + } + + return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult) +} + +func (b *basicCommonValidator) redeem() { + pools.poolOfBasicCommonValidators.RedeemValidator(b) +} + +// A HeaderValidator has very limited subset of validations to apply +type HeaderValidator struct { + name string + header *spec.Header + validators [6]valueValidator + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +// NewHeaderValidator creates a new header validator object +func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newHeaderValidator(name, header, formats, opts) +} + +func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *HeaderValidator + if opts.recycleValidators { + p = pools.poolOfHeaderValidators.BorrowValidator() + } else { + p = new(HeaderValidator) + } + + p.name = name + p.header = header + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + name, + "header", + spec.StringOrArray([]string{header.Type}), + header.Nullable, + header.Format, + p.Options, + ), + p.stringValidator(), + p.formatValidator(), + p.numberValidator(), + p.sliceValidator(), + p.commonValidator(), + } + + return p +} + +// Validate the value of the header against its schema +func (p *HeaderValidator) Validate(data any) *Result { + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + + for idx, validator := range p.validators { + if !validator.Applies(p.header, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) + } + } + + return result +} + +func (p *HeaderValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + p.name, + "response", + p.header.Default, + p.header.Enum, + p.Options, + ) +} + +func (p *HeaderValidator) sliceValidator() valueValidator { + return newBasicSliceValidator( + p.name, + "response", + p.header.Default, + p.header.MaxItems, + p.header.MinItems, + p.header.UniqueItems, + p.header.Items, + p.header, + p.KnownFormats, + p.Options, + ) +} + +func (p *HeaderValidator) numberValidator() valueValidator { + return newNumberValidator( + p.name, + "response", + p.header.Default, + p.header.MultipleOf, + p.header.Maximum, + p.header.ExclusiveMaximum, + p.header.Minimum, + p.header.ExclusiveMinimum, + p.header.Type, + p.header.Format, + p.Options, + ) +} + +func (p *HeaderValidator) stringValidator() valueValidator { + return newStringValidator( + p.name, + "response", + p.header.Default, + true, + false, + p.header.MaxLength, + p.header.MinLength, + p.header.Pattern, + p.Options, + ) +} + +func (p *HeaderValidator) formatValidator() valueValidator { + return newFormatValidator( + p.name, + "response", + p.header.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *HeaderValidator) redeem() { + pools.poolOfHeaderValidators.RedeemValidator(p) +} + +func (p *HeaderValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool + } +} + +// A ParamValidator has very limited subset of validations to apply +type ParamValidator struct { + param *spec.Parameter + validators [6]valueValidator + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +// NewParamValidator creates a new param validator object +func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newParamValidator(param, formats, opts) +} + +func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *ParamValidator + if opts.recycleValidators { + p = pools.poolOfParamValidators.BorrowValidator() + } else { + p = new(ParamValidator) + } + + p.param = param + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + param.Name, + param.In, + spec.StringOrArray([]string{param.Type}), + param.Nullable, + param.Format, + p.Options, + ), + p.stringValidator(), + p.formatValidator(), + p.numberValidator(), + p.sliceValidator(), + p.commonValidator(), + } + + return p +} + +// Validate the data against the description of the parameter +func (p *ParamValidator) Validate(data any) *Result { + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + + tpe := reflect.TypeOf(data) + kind := tpe.Kind() + + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + + // TODO: validate type + for idx, validator := range p.validators { + if !validator.Applies(p.param, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // prevents further (unsafe) usage + } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) + } + } + + return result +} + +func (p *ParamValidator) commonValidator() valueValidator { + return newBasicCommonValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Enum, + p.Options, + ) +} + +func (p *ParamValidator) sliceValidator() valueValidator { + return newBasicSliceValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MaxItems, + p.param.MinItems, + p.param.UniqueItems, + p.param.Items, + p.param, + p.KnownFormats, + p.Options, + ) +} + +func (p *ParamValidator) numberValidator() valueValidator { + return newNumberValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MultipleOf, + p.param.Maximum, + p.param.ExclusiveMaximum, + p.param.Minimum, + p.param.ExclusiveMinimum, + p.param.Type, + p.param.Format, + p.Options, + ) +} + +func (p *ParamValidator) stringValidator() valueValidator { + return newStringValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Required, + p.param.AllowEmptyValue, + p.param.MaxLength, + p.param.MinLength, + p.param.Pattern, + p.Options, + ) +} + +func (p *ParamValidator) formatValidator() valueValidator { + return newFormatValidator( + p.param.Name, + p.param.In, + p.param.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *ParamValidator) redeem() { + pools.poolOfParamValidators.RedeemValidator(p) +} + +func (p *ParamValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool + } +} + +type basicSliceValidator struct { + Path string + In string + Default any + MaxItems *int64 + MinItems *int64 + UniqueItems bool + Items *spec.Items + Source any + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newBasicSliceValidator( + path, in string, + def any, maxItems, minItems *int64, uniqueItems bool, items *spec.Items, + source any, formats strfmt.Registry, + opts *SchemaValidatorOptions) *basicSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *basicSliceValidator + if opts.recycleValidators { + s = pools.poolOfBasicSliceValidators.BorrowValidator() + } else { + s = new(basicSliceValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.MaxItems = maxItems + s.MinItems = minItems + s.UniqueItems = uniqueItems + s.Items = items + s.Source = source + s.KnownFormats = formats + s.Options = opts + + return s +} + +func (s *basicSliceValidator) SetPath(path string) { + s.Path = path +} + +func (s *basicSliceValidator) Applies(source any, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Items, *spec.Header: + return kind == reflect.Slice + default: + return false + } +} + +func (s *basicSliceValidator) Validate(data any) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + val := reflect.ValueOf(data) + + size := int64(val.Len()) + if s.MinItems != nil { + if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.MaxItems != nil { + if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.UniqueItems { + if err := UniqueItems(s.Path, s.In, data); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.Items == nil { + return nil + } + + for i := range int(size) { + itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options) + ele := val.Index(i) + if err := itemsValidator.Validate(i, ele.Interface()); err != nil { + if err.HasErrors() { + return err + } + if err.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(err) + } + } + } + + return nil +} + +func (s *basicSliceValidator) redeem() { + pools.poolOfBasicSliceValidators.RedeemValidator(s) +} + +type numberValidator struct { + Path string + In string + Default any + MultipleOf *float64 + Maximum *float64 + ExclusiveMaximum bool + Minimum *float64 + ExclusiveMinimum bool + // Allows for more accurate behavior regarding integers + Type string + Format string + Options *SchemaValidatorOptions +} + +func newNumberValidator( + path, in string, def any, + multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool, + typ, format string, + opts *SchemaValidatorOptions) *numberValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var n *numberValidator + if opts.recycleValidators { + n = pools.poolOfNumberValidators.BorrowValidator() + } else { + n = new(numberValidator) + } + + n.Path = path + n.In = in + n.Default = def + n.MultipleOf = multipleOf + n.Maximum = maximum + n.ExclusiveMaximum = exclusiveMaximum + n.Minimum = minimum + n.ExclusiveMinimum = exclusiveMinimum + n.Type = typ + n.Format = format + n.Options = opts + + return n +} + +func (n *numberValidator) SetPath(path string) { + n.Path = path +} + +func (n *numberValidator) Applies(source any, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: + isInt := kind >= reflect.Int && kind <= reflect.Uint64 + isFloat := kind == reflect.Float32 || kind == reflect.Float64 + return isInt || isFloat + default: + return false + } +} + +// Validate provides a validator for generic JSON numbers, +// +// By default, numbers are internally represented as float64. +// Formats float, or float32 may alter this behavior by mapping to float32. +// A special validation process is followed for integers, with optional "format": +// this is an attempt to provide a validation with native types. +// +// NOTE: since the constraint specified (boundary, multipleOf) is unmarshalled +// as float64, loss of information remains possible (e.g. on very large integers). +// +// Since this value directly comes from the unmarshalling, it is not possible +// at this stage of processing to check further and guarantee the correctness of such values. +// +// Normally, the JSON Number.MAX_SAFE_INTEGER (resp. Number.MIN_SAFE_INTEGER) +// would check we do not get such a loss. +// +// If this is the case, replace AddErrors() by AddWarnings() and IsValid() by !HasWarnings(). +// +// TODO: consider replacing boundary check errors by simple warnings. +// +// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) +func (n *numberValidator) Validate(val any) *Result { + if n.Options.recycleValidators { + defer func() { + n.redeem() + }() + } + + var res, resMultiple, resMinimum, resMaximum *Result + if n.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } + + // Used only to attempt to validate constraint on value, + // even though value or constraint specified do not match type and format + data := valueHelp.asFloat64(val) + + // Is the provided value within the range of the specified numeric type and format? + res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path)) + + if n.MultipleOf != nil { + resMultiple = pools.poolOfResults.BorrowResult() + + // Is the constraint specifier within the range of the specific numeric type and format? + resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path)) + if resMultiple.IsValid() { + // Constraint validated with compatible types + if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil { + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil { + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } + } + + if n.Maximum != nil { + resMaximum = pools.poolOfResults.BorrowResult() + + // Is the constraint specifier within the range of the specific numeric type and format? + resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path)) + if resMaximum.IsValid() { + // Constraint validated with compatible types + if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil { + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil { + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } + } + + if n.Minimum != nil { + resMinimum = pools.poolOfResults.BorrowResult() + + // Is the constraint specifier within the range of the specific numeric type and format? + resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path)) + if resMinimum.IsValid() { + // Constraint validated with compatible types + if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil { + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } else { + // Constraint nevertheless validated, converted as general number + if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil { + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) + } + } + } + res.Merge(resMultiple, resMinimum, resMaximum) + res.Inc() + + return res +} + +func (n *numberValidator) redeem() { + pools.poolOfNumberValidators.RedeemValidator(n) +} + +type stringValidator struct { + Path string + In string + Default any + Required bool + AllowEmptyValue bool + MaxLength *int64 + MinLength *int64 + Pattern string + Options *SchemaValidatorOptions +} + +func newStringValidator( + path, in string, + def any, required, allowEmpty bool, maxLength, minLength *int64, pattern string, + opts *SchemaValidatorOptions) *stringValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *stringValidator + if opts.recycleValidators { + s = pools.poolOfStringValidators.BorrowValidator() + } else { + s = new(stringValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.Required = required + s.AllowEmptyValue = allowEmpty + s.MaxLength = maxLength + s.MinLength = minLength + s.Pattern = pattern + s.Options = opts + + return s +} + +func (s *stringValidator) SetPath(path string) { + s.Path = path +} + +func (s *stringValidator) Applies(source any, kind reflect.Kind) bool { + switch source.(type) { + case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: + return kind == reflect.String + default: + return false + } +} + +func (s *stringValidator) Validate(val any) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + + data, ok := val.(string) + if !ok { + return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult) + } + + if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") { + if err := RequiredString(s.Path, s.In, data); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.MaxLength != nil { + if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.MinLength != nil { + if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + + if s.Pattern != "" { + if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil { + return errorHelp.sErr(err, s.Options.recycleResult) + } + } + return nil +} + +func (s *stringValidator) redeem() { + pools.poolOfStringValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go new file mode 100644 index 00000000000..e7dd5c8d3ab --- /dev/null +++ b/vendor/github.com/go-openapi/validate/values.go @@ -0,0 +1,448 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "context" + "fmt" + "reflect" + "strings" + "unicode/utf8" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" +) + +type valueError string + +func (e valueError) Error() string { + return string(e) +} + +// ErrValue indicates that a value validation occurred +const ErrValue valueError = "value validation error" + +// Enum validates if the data is a member of the enum +func Enum(path, in string, data any, enum any) *errors.Validation { + return EnumCase(path, in, data, enum, true) +} + +// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings +func EnumCase(path, in string, data any, enum any, caseSensitive bool) *errors.Validation { + val := reflect.ValueOf(enum) + if val.Kind() != reflect.Slice { + return nil + } + + dataString := convertEnumCaseStringKind(data, caseSensitive) + values := make([]any, 0, val.Len()) + for i := range val.Len() { + ele := val.Index(i) + enumValue := ele.Interface() + if data != nil { + if reflect.DeepEqual(data, enumValue) { + return nil + } + enumString := convertEnumCaseStringKind(enumValue, caseSensitive) + if dataString != nil && enumString != nil && strings.EqualFold(*dataString, *enumString) { + return nil + } + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard. Frankly, I don't know how we may get a nil + continue + } + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil + } + } + } + values = append(values, enumValue) + } + return errors.EnumFail(path, in, data, values) +} + +// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set +func convertEnumCaseStringKind(value any, caseSensitive bool) *string { + if caseSensitive { + return nil + } + + val := reflect.ValueOf(value) + if val.Kind() != reflect.String { + return nil + } + + str := fmt.Sprintf("%v", value) + return &str +} + +// MinItems validates that there are at least n items in a slice +func MinItems(path, in string, size, minimum int64) *errors.Validation { + if size < minimum { + return errors.TooFewItems(path, in, minimum, size) + } + return nil +} + +// MaxItems validates that there are at most n items in a slice +func MaxItems(path, in string, size, maximum int64) *errors.Validation { + if size > maximum { + return errors.TooManyItems(path, in, maximum, size) + } + return nil +} + +// UniqueItems validates that the provided slice has unique elements +func UniqueItems(path, in string, data any) *errors.Validation { + val := reflect.ValueOf(data) + if val.Kind() != reflect.Slice { + return nil + } + unique := make([]any, 0, val.Len()) + for i := range val.Len() { + v := val.Index(i).Interface() + for _, u := range unique { + if reflect.DeepEqual(v, u) { + return errors.DuplicateItems(path, in) + } + } + unique = append(unique, v) + } + return nil +} + +// MinLength validates a string for minimum length +func MinLength(path, in, data string, minLength int64) *errors.Validation { + strLen := int64(utf8.RuneCountInString(data)) + if strLen < minLength { + return errors.TooShort(path, in, minLength, data) + } + return nil +} + +// MaxLength validates a string for maximum length +func MaxLength(path, in, data string, maxLength int64) *errors.Validation { + strLen := int64(utf8.RuneCountInString(data)) + if strLen > maxLength { + return errors.TooLong(path, in, maxLength, data) + } + return nil +} + +// ReadOnly validates an interface for readonly +func ReadOnly(ctx context.Context, path, in string, data any) *errors.Validation { + + // read only is only validated when operationType is request + if op := extractOperationType(ctx); op != request { + return nil + } + + // data must be of zero value of its type + val := reflect.ValueOf(data) + if val.IsValid() { + if reflect.DeepEqual(reflect.Zero(val.Type()).Interface(), val.Interface()) { + return nil + } + } else { + return nil + } + + return errors.ReadOnly(path, in, data) +} + +// Required validates an interface for requiredness +func Required(path, in string, data any) *errors.Validation { + val := reflect.ValueOf(data) + if val.IsValid() { + if reflect.DeepEqual(reflect.Zero(val.Type()).Interface(), val.Interface()) { + return errors.Required(path, in, data) + } + return nil + } + return errors.Required(path, in, data) +} + +// RequiredString validates a string for requiredness +func RequiredString(path, in, data string) *errors.Validation { + if data == "" { + return errors.Required(path, in, data) + } + return nil +} + +// RequiredNumber validates a number for requiredness +func RequiredNumber(path, in string, data float64) *errors.Validation { + if data == 0 { + return errors.Required(path, in, data) + } + return nil +} + +// Pattern validates a string against a regular expression +func Pattern(path, in, data, pattern string) *errors.Validation { + re, err := compileRegexp(pattern) + if err != nil { + return errors.FailedPattern(path, in, fmt.Sprintf("%s, but pattern is invalid: %s", pattern, err.Error()), data) + } + if !re.MatchString(data) { + return errors.FailedPattern(path, in, pattern, data) + } + return nil +} + +// MaximumInt validates if a number is smaller than a given maximum +func MaximumInt(path, in string, data, maximum int64, exclusive bool) *errors.Validation { + if (!exclusive && data > maximum) || (exclusive && data >= maximum) { + return errors.ExceedsMaximumInt(path, in, maximum, exclusive, data) + } + return nil +} + +// MaximumUint validates if a number is smaller than a given maximum +func MaximumUint(path, in string, data, maximum uint64, exclusive bool) *errors.Validation { + if (!exclusive && data > maximum) || (exclusive && data >= maximum) { + return errors.ExceedsMaximumUint(path, in, maximum, exclusive, data) + } + return nil +} + +// Maximum validates if a number is smaller than a given maximum +func Maximum(path, in string, data, maximum float64, exclusive bool) *errors.Validation { + if (!exclusive && data > maximum) || (exclusive && data >= maximum) { + return errors.ExceedsMaximum(path, in, maximum, exclusive, data) + } + return nil +} + +// Minimum validates if a number is smaller than a given minimum +func Minimum(path, in string, data, minimum float64, exclusive bool) *errors.Validation { + if (!exclusive && data < minimum) || (exclusive && data <= minimum) { + return errors.ExceedsMinimum(path, in, minimum, exclusive, data) + } + return nil +} + +// MinimumInt validates if a number is smaller than a given minimum +func MinimumInt(path, in string, data, minimum int64, exclusive bool) *errors.Validation { + if (!exclusive && data < minimum) || (exclusive && data <= minimum) { + return errors.ExceedsMinimumInt(path, in, minimum, exclusive, data) + } + return nil +} + +// MinimumUint validates if a number is smaller than a given minimum +func MinimumUint(path, in string, data, minimum uint64, exclusive bool) *errors.Validation { + if (!exclusive && data < minimum) || (exclusive && data <= minimum) { + return errors.ExceedsMinimumUint(path, in, minimum, exclusive, data) + } + return nil +} + +// MultipleOf validates if the provided number is a multiple of the factor +func MultipleOf(path, in string, data, factor float64) *errors.Validation { + // multipleOf factor must be positive + if factor <= 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + var mult float64 + if factor < 1 { + mult = 1 / factor * data + } else { + mult = data / factor + } + if !conv.IsFloat64AJSONInteger(mult) { + return errors.NotMultipleOf(path, in, factor, data) + } + return nil +} + +// MultipleOfInt validates if the provided integer is a multiple of the factor +func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation { + // multipleOf factor must be positive + if factor <= 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + mult := data / factor + if mult*factor != data { + return errors.NotMultipleOf(path, in, factor, data) + } + return nil +} + +// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor +func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { + // multipleOf factor must be positive + if factor == 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } + mult := data / factor + if mult*factor != data { + return errors.NotMultipleOf(path, in, factor, data) + } + return nil +} + +// FormatOf validates if a string matches a format in the format registry +func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.Validation { + if registry == nil { + registry = strfmt.Default + } + if ok := registry.ContainsName(format); !ok { + return errors.InvalidTypeName(format) + } + if ok := registry.Validates(format, data); !ok { + return errors.InvalidType(path, in, format, data) + } + return nil +} + +// MaximumNativeType provides native type constraint validation as a facade +// to various numeric types versions of Maximum constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the max value is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MaximumNativeType(path, in string, val any, maximum float64, exclusive bool) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MaximumInt(path, in, value, int64(maximum), exclusive) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + if maximum < 0 { + return errors.ExceedsMaximum(path, in, maximum, exclusive, val) + } + return MaximumUint(path, in, value, uint64(maximum), exclusive) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return Maximum(path, in, value, maximum, exclusive) + } +} + +// MinimumNativeType provides native type constraint validation as a facade +// to various numeric types versions of Minimum constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the min value is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MinimumInt(path, in, value, int64(minimum), exclusive) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + if minimum < 0 { + return nil + } + return MinimumUint(path, in, value, uint64(minimum), exclusive) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return Minimum(path, in, value, minimum, exclusive) + } +} + +// MultipleOfNativeType provides native type constraint validation as a facade +// to various numeric types version of MultipleOf constraint check. +// +// Assumes that any possible loss conversion during conversion has been +// checked beforehand. +// +// NOTE: currently, the multipleOf factor is marshalled as a float64, no matter what, +// which means there may be a loss during conversions (e.g. for very large integers) +// +// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +func MultipleOfNativeType(path, in string, val any, multipleOf float64) *errors.Validation { + kind := reflect.ValueOf(val).Type().Kind() + switch kind { //nolint:exhaustive + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value := valueHelp.asInt64(val) + return MultipleOfInt(path, in, value, int64(multipleOf)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value := valueHelp.asUint64(val) + return MultipleOfUint(path, in, value, uint64(multipleOf)) + case reflect.Float32, reflect.Float64: + fallthrough + default: + value := valueHelp.asFloat64(val) + return MultipleOf(path, in, value, multipleOf) + } +} + +// IsValueValidAgainstRange checks that a numeric value is compatible with +// the range defined by Type and Format, that is, may be converted without loss. +// +// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L +func IsValueValidAgainstRange(val any, typeName, format, prefix, path string) error { + kind := reflect.ValueOf(val).Type().Kind() + + // What is the string representation of val + var stringRep string + switch kind { //nolint:exhaustive + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + stringRep = conv.FormatUinteger(valueHelp.asUint64(val)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stringRep = conv.FormatInteger(valueHelp.asInt64(val)) + case reflect.Float32, reflect.Float64: + stringRep = conv.FormatFloat(valueHelp.asFloat64(val)) + default: + return fmt.Errorf("%s value number range checking called with invalid (non numeric) val type in %s: %w", prefix, path, ErrValue) + } + + var errVal error + + switch typeName { + case integerType: + switch format { + case integerFormatInt32: + _, errVal = conv.ConvertInt32(stringRep) + case integerFormatUInt32: + _, errVal = conv.ConvertUint32(stringRep) + case integerFormatUInt64: + _, errVal = conv.ConvertUint64(stringRep) + case integerFormatInt64: + fallthrough + default: + _, errVal = conv.ConvertInt64(stringRep) + } + case numberType: + fallthrough + default: + switch format { + case numberFormatFloat, numberFormatFloat32: + _, errVal = conv.ConvertFloat32(stringRep) + case numberFormatDouble, numberFormatFloat64: + fallthrough + default: + // No check can be performed here since + // no number beyond float64 is supported + } + } + if errVal != nil { // We don't report the actual errVal from strconv + if format != "" { + errVal = fmt.Errorf("%s value must be of type %s with format %s in %s: %w", prefix, typeName, format, path, ErrValue) + } else { + errVal = fmt.Errorf("%s value must be of type %s (default format) in %s: %w", prefix, typeName, path, ErrValue) + } + } + return errVal +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig new file mode 100644 index 00000000000..faef0c91e7e --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -0,0 +1,21 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 + +[.golangci.yaml] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 00000000000..2e0f9f5f711 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 00000000000..470e7ca2bd2 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 00000000000..bda96256683 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,48 @@ +version: "2" + +run: + timeout: 10m + +linters: + enable: + - govet + - ineffassign + # - misspell + - nolintlint + # - revive + + disable: + - errcheck + - staticcheck + - unused + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule + gofmt: + simplify: true + rewrite-rules: + - pattern: interface{} + replacement: any + + exclusions: + paths: + - internal/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md new file mode 100644 index 00000000000..afd44e5f5fc --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -0,0 +1,104 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE new file mode 100644 index 00000000000..f9c841a51e0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 00000000000..45db719755a --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,81 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?style=flat-square)](https://github.com/go-viper/mapstructure/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/go-viper/mapstructure?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-viper/mapstructure/badge?style=flat-square)](https://deps.dev/go/github.com%252Fgo-viper%252Fmapstructure%252Fv2) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's|github.com/mitchellh/mapstructure|github.com/go-viper/mapstructure/v2|g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 00000000000..a852a0a04c8 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,714 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an any) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []any{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// cachedDecodeHook takes a raw DecodeHookFunc (an any) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (any, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (any, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (any, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (any, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (any, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (any, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } + return func(f reflect.Value, t reflect.Value) (any, error) { + var err error + data := f.Interface() + + newFrom := f + for _, c := range cached { + data, err = c(newFrom, t) + if err != nil { + return nil, err + } + if v, ok := data.(reflect.Value); ok { + newFrom = v + } else { + newFrom = reflect.ValueOf(data) + } + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } + return func(a, b reflect.Value) (any, error) { + var allErrs string + var out any + var err error + + for _, c := range cached { + out, err = c(a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToWeakSliceHookFunc brings back the old (pre-v2) behavior of [StringToSliceHookFunc]. +// +// As of mapstructure v2.0.0 [StringToSliceHookFunc] checks if the return type is a string slice. +// This function removes that check. +func StringToWeakSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + d, err := time.ParseDuration(data.(string)) + + return d, wrapTimeParseDurationError(err) + } +} + +// StringToTimeLocationHookFunc returns a DecodeHookFunc that converts +// strings to *time.Location. +func StringToTimeLocationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Local) { + return data, nil + } + d, err := time.LoadLocation(data.(string)) + + return d, wrapTimeParseLocationError(err) + } +} + +// StringToURLHookFunc returns a DecodeHookFunc that converts +// strings to *url.URL. +func StringToURLHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(&url.URL{}) { + return data, nil + } + + // Convert it by parsing + u, err := url.Parse(data.(string)) + + return u, wrapUrlError(err) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip") + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, wrapNetParseError(err) + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + ti, err := time.Parse(layout, data.(string)) + + return ti, wrapTimeParseError(err) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data any, +) (any, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (any, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i any = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]any) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + addr, err := netip.ParseAddr(data.(string)) + + return addr, wrapNetIPParseAddrError(err) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + addrPort, err := netip.ParseAddrPort(data.(string)) + + return addrPort, wrapNetIPParseAddrPortError(err) + } +} + +// StringToNetIPPrefixHookFunc returns a DecodeHookFunc that converts +// strings to netip.Prefix. +func StringToNetIPPrefixHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data any, + ) (any, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Prefix{}) { + return data, nil + } + + // Convert it by parsing + prefix, err := netip.ParsePrefix(data.(string)) + + return prefix, wrapNetIPParsePrefixError(err) + } +} + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), wrapStrconvNumError(err) + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), wrapStrconvNumError(err) + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), wrapStrconvNumError(err) + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), wrapStrconvNumError(err) + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), wrapStrconvNumError(err) + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), wrapStrconvNumError(err) + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 64) + return int64(i64), wrapStrconvNumError(err) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 64) + return uint64(u64), wrapStrconvNumError(err) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), wrapStrconvNumError(err) + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), wrapStrconvNumError(err) + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), wrapStrconvNumError(err) + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 64) + return f64, wrapStrconvNumError(err) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + b, err := strconv.ParseBool(data.(string)) + return b, wrapStrconvNumError(err) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), wrapStrconvNumError(err) + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data any) (any, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 128) + return c128, wrapStrconvNumError(err) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/errors.go b/vendor/github.com/go-viper/mapstructure/v2/errors.go new file mode 100644 index 00000000000..07d31c22aad --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/errors.go @@ -0,0 +1,244 @@ +package mapstructure + +import ( + "errors" + "fmt" + "net" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// Error interface is implemented by all errors emitted by mapstructure. +// +// Use [errors.As] to check if an error implements this interface. +type Error interface { + error + + mapstructure() +} + +// DecodeError is a generic error type that holds information about +// a decoding error together with the name of the field that caused the error. +type DecodeError struct { + name string + err error +} + +func newDecodeError(name string, err error) *DecodeError { + return &DecodeError{ + name: name, + err: err, + } +} + +func (e *DecodeError) Name() string { + return e.name +} + +func (e *DecodeError) Unwrap() error { + return e.err +} + +func (e *DecodeError) Error() string { + return fmt.Sprintf("'%s' %s", e.name, e.err) +} + +func (*DecodeError) mapstructure() {} + +// ParseError is an error type that indicates a value could not be parsed +// into the expected type. +type ParseError struct { + Expected reflect.Value + Value any + Err error +} + +func (e *ParseError) Error() string { + return fmt.Sprintf("cannot parse value as '%s': %s", e.Expected.Type(), e.Err) +} + +func (*ParseError) mapstructure() {} + +// UnconvertibleTypeError is an error type that indicates a value could not be +// converted to the expected type. +type UnconvertibleTypeError struct { + Expected reflect.Value + Value any +} + +func (e *UnconvertibleTypeError) Error() string { + return fmt.Sprintf( + "expected type '%s', got unconvertible type '%s'", + e.Expected.Type(), + reflect.TypeOf(e.Value), + ) +} + +func (*UnconvertibleTypeError) mapstructure() {} + +func wrapStrconvNumError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*strconv.NumError); ok { + return &strconvNumError{Err: err} + } + + return err +} + +type strconvNumError struct { + Err *strconv.NumError +} + +func (e *strconvNumError) Error() string { + return "strconv." + e.Err.Func + ": " + e.Err.Err.Error() +} + +func (e *strconvNumError) Unwrap() error { return e.Err } + +func wrapUrlError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*url.Error); ok { + return &urlError{Err: err} + } + + return err +} + +type urlError struct { + Err *url.Error +} + +func (e *urlError) Error() string { + return fmt.Sprintf("%s", e.Err.Err) +} + +func (e *urlError) Unwrap() error { return e.Err } + +func wrapNetParseError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*net.ParseError); ok { + return &netParseError{Err: err} + } + + return err +} + +type netParseError struct { + Err *net.ParseError +} + +func (e *netParseError) Error() string { + return "invalid " + e.Err.Type +} + +func (e *netParseError) Unwrap() error { return e.Err } + +func wrapTimeParseError(err error) error { + if err == nil { + return nil + } + + if err, ok := err.(*time.ParseError); ok { + return &timeParseError{Err: err} + } + + return err +} + +type timeParseError struct { + Err *time.ParseError +} + +func (e *timeParseError) Error() string { + if e.Err.Message == "" { + return fmt.Sprintf("parsing time as %q: cannot parse as %q", e.Err.Layout, e.Err.LayoutElem) + } + + return "parsing time " + e.Err.Message +} + +func (e *timeParseError) Unwrap() error { return e.Err } + +func wrapNetIPParseAddrError(err error) error { + if err == nil { + return nil + } + + if errMsg := err.Error(); strings.HasPrefix(errMsg, "ParseAddr") { + errPieces := strings.Split(errMsg, ": ") + + return fmt.Errorf("ParseAddr: %s", errPieces[len(errPieces)-1]) + } + + return err +} + +func wrapNetIPParseAddrPortError(err error) error { + if err == nil { + return nil + } + + errMsg := err.Error() + if strings.HasPrefix(errMsg, "invalid port ") { + return errors.New("invalid port") + } else if strings.HasPrefix(errMsg, "invalid ip:port ") { + return errors.New("invalid ip:port") + } + + return err +} + +func wrapNetIPParsePrefixError(err error) error { + if err == nil { + return nil + } + + if errMsg := err.Error(); strings.HasPrefix(errMsg, "netip.ParsePrefix") { + errPieces := strings.Split(errMsg, ": ") + + return fmt.Errorf("netip.ParsePrefix: %s", errPieces[len(errPieces)-1]) + } + + return err +} + +func wrapTimeParseDurationError(err error) error { + if err == nil { + return nil + } + + errMsg := err.Error() + if strings.HasPrefix(errMsg, "time: unknown unit ") { + return errors.New("time: unknown unit") + } else if strings.HasPrefix(errMsg, "time: ") { + idx := strings.LastIndex(errMsg, " ") + + return errors.New(errMsg[:idx]) + } + + return err +} + +func wrapTimeParseLocationError(err error) error { + if err == nil { + return nil + } + errMsg := err.Error() + if strings.Contains(errMsg, "unknown time zone") || strings.HasPrefix(errMsg, "time: unknown format") { + return fmt.Errorf("invalid time zone format: %w", err) + } + + return err +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 00000000000..5e67bdd6b42 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,294 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": [ + "devenv" + ], + "flake-compat": [ + "devenv" + ], + "git-hooks": [ + "devenv" + ], + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1742042642, + "narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=", + "owner": "cachix", + "repo": "cachix", + "rev": "a624d3eaf4b1d225f918de8543ed739f2f574203", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "latest", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", + "nix": "nix", + "nixpkgs": "nixpkgs_3" + }, + "locked": { + "lastModified": 1744876578, + "narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=", + "owner": "cachix", + "repo": "devenv", + "rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1733328505, + "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1712014858, + "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-parts_2": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1743550720, + "narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "c621e8422220273271f52058f618c94e405bb0f5", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": [ + "devenv" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1742649964, + "narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "libgit2": { + "flake": false, + "locked": { + "lastModified": 1697646580, + "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=", + "owner": "libgit2", + "repo": "libgit2", + "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5", + "type": "github" + }, + "original": { + "owner": "libgit2", + "repo": "libgit2", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": [ + "devenv" + ], + "flake-parts": "flake-parts", + "libgit2": "libgit2", + "nixpkgs": "nixpkgs_2", + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ], + "pre-commit-hooks": [ + "devenv" + ] + }, + "locked": { + "lastModified": 1741798497, + "narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=", + "owner": "domenkozar", + "repo": "nix", + "rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.24", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1733212471, + "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1743296961, + "narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1717432640, + "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "release-24.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1733477122, + "narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_4": { + "locked": { + "lastModified": 1744536153, + "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_4" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 00000000000..3b116f426d4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,46 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = + inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ + "x86_64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; + + perSystem = + { pkgs, ... }: + rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 00000000000..d1c15e474f4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 00000000000..d74e3a0b5a4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 00000000000..700b40229cb --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go new file mode 100644 index 00000000000..7c35bce0202 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -0,0 +1,1712 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]any +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// # Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// # Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// # Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]any{ +// "person": map[string]any{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]any{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]any{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// # Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]any" or "map[any]any". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]any `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]any{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// # Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value, or a zero-length element. The zero value of all types is +// specified in the Go specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. And likewise for the URLs field, if the +// slice is nil or empty, it won't be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// URLs []string `mapstructure:",omitempty"` +// } +// +// # Omit Zero Values +// +// When decoding from a struct to any other value, you may use the +// ",omitzero" suffix on your tag to omit that value if it equates to the zero +// value. The zero value of all types is specified in the Go specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. And likewise for the URLs field, if the +// slice is nil, it won't be encoded into the destination type. +// +// Note that if the field is a slice, and it is empty but not nil, it will +// still be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitzero"` +// URLs []string `mapstructure:",omitzero"` +// } +// +// # Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]any{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// # Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc any + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, any) (any, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // AllowUnsetPointer, if set to true, will prevent fields with pointer types + // from being reported as unset, even if ErrorUnset is true and the field was + // not present in the input data. This allows pointer fields to be optional + // without triggering an error when they are missing. + AllowUnsetPointer bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result any + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool + + // DecodeNil, if set to true, will cause the DecodeHook (if present) to run + // even if the input is nil. This can be used to provide default values. + DecodeNil bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (any, error) +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input any, output any) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output any) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input any, output any, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input any, output any, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input any) error { + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// isNil returns true if the input is nil or a typed nil pointer. +func isNil(input any) bool { + if input == nil { + return true + } + val := reflect.ValueOf(input) + return val.Kind() == reflect.Ptr && val.IsNil() +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input any, outVal reflect.Value) error { + var ( + inputVal = reflect.ValueOf(input) + outputKind = getKind(outVal) + decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil + ) + if isNil(input) { + // Typed nils won't match the "input == nil" below, so reset input. + input = nil + } + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + if !decodeNil { + return nil + } + } + if !inputVal.IsValid() { + if !decodeNil { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + // Hooks need a valid inputVal, so reset it to zero value of outVal type. + switch outputKind { + case reflect.Struct, reflect.Map: + var mapVal map[string]any + inputVal = reflect.ValueOf(mapVal) // create nil map pointer + case reflect.Slice, reflect.Array: + var sliceVal []any + inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer + default: + inputVal = reflect.Zero(outVal.Type()) + } + } + + if d.cachedDecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = d.cachedDecodeHook(inputVal, outVal) + if err != nil { + return newDecodeError(name, err) + } + } + if isNil(input) { + return nil + } + + var err error + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind)) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data any, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: err, + }) + } + val.SetInt(i) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: fmt.Errorf("%d overflows uint", i), + }) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: fmt.Errorf("%f overflows uint", f), + }) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + val.SetUint(i) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: wrapStrconvNumError(err), + }) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return newDecodeError(name, &ParseError{ + Expected: val, + Value: data, + Err: err, + }) + } + val.SetFloat(i) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeComplex(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data any, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + + // Check input type and based on the input type jump to the proper func + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + var errs []error + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errs = append(errs, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errs = append(errs, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return newDecodeError( + name+"."+f.Name, + fmt.Errorf("cannot assign type %q to map value field of type %q", v.Type(), valMap.Type().Elem()), + ) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "omitzero" is specified in the tag, it ignores zero values. + if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return newDecodeError( + name+"."+f.Name, + fmt.Errorf("cannot squash non-struct type %q", v.Type()), + ) + } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return newDecodeError( + name+"."+f.Name, + fmt.Errorf("error remain-tag field with invalid type: %q", v.Type()), + ) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data any, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data any, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return newDecodeError(name, &UnconvertibleTypeError{ + Expected: val, + Value: data, + }) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []any{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []any{data}, val) + } + } + + return newDecodeError(name, + fmt.Errorf("source data must be an array or slice, got %s", dataValKind)) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeArray(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []any{data}, val) + } + } + + return newDecodeError(name, + fmt.Errorf("source data must be an array or slice, got %s", dataValKind)) + + } + if dataVal.Len() > arrayType.Len() { + return newDecodeError(name, + fmt.Errorf("expected source data to have length less or equal to %d, got %d", arrayType.Len(), dataVal.Len())) + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeStruct(name string, data any, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]any)(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return newDecodeError(name, + fmt.Errorf("expected a map or struct, got %q", dataValKind)) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return newDecodeError(name, + fmt.Errorf("needs a map with string keys, has %q keys", kind)) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[any]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[any]struct{}) + + var errs []error + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == d.config.SquashTagOption { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + switch fieldVal.Kind() { + case reflect.Struct: + structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, newDecodeError( + name+"."+fieldType.Name, + fmt.Errorf("unsupported type for squash: %s", fieldVal.Kind()), + )) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + if !(d.config.AllowUnsetPointer && fieldValue.Kind() == reflect.Ptr) { + targetValKeysUnused[fieldName] = struct{}{} + } + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errs = append(errs, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[any]any{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errs = append(errs, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + errs = append(errs, newDecodeError( + name, + fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")), + )) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + errs = append(errs, newDecodeError( + name, + fmt.Errorf("has unset fields: %s", strings.Join(keys, ", ")), + )) + } + + if err := errors.Join(errs...); err != nil { + return err + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 00000000000..d0913fff6c7 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 00000000000..f8255a1b174 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/google/certificate-transparency-go/.gitignore b/vendor/github.com/google/certificate-transparency-go/.gitignore new file mode 100644 index 00000000000..8c13cd1c9d3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/.gitignore @@ -0,0 +1,28 @@ +*.iml +*.swo +*.swp +*.tfstate +*.tfstate.backup +*~ +/.idea +/certcheck +/chainfix +/coverage.txt +/createtree +/crlcheck +/ctclient +/ct_server +/ct_hammer +/data +/dumpscts +/findlog +/goshawk +/gosmin +/gossip_server +/preloader +/scanlog +/sctcheck +/sctscan +/trillian_log_server +/trillian_log_signer +/trillian.json diff --git a/vendor/github.com/google/certificate-transparency-go/.golangci.yaml b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml new file mode 100644 index 00000000000..e9b683b2bd1 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml @@ -0,0 +1,39 @@ +version: "2" +linters: + settings: + depguard: + rules: + main: + deny: + - pkg: ^golang.org/x/net/context$ + - pkg: github.com/gogo/protobuf/proto + - pkg: encoding/asn1 + - pkg: crypto/x509 + gocyclo: + min-complexity: 25 + exclusions: + generated: lax + rules: + - linters: + - staticcheck + text: 'SA1019: grpc.Dial is deprecated: use NewClient instead' + - linters: + - staticcheck + text: 'SA1019: grpc.DialContext is deprecated: use NewClient instead' + - linters: + - staticcheck + text: 'SA1019: grpc.WithBlock is deprecated: this DialOption is not supported by NewClient' + paths: + - (^|/)x509($|/) + - (^|/)x509util($|/) + - (^|/)asn1($|/) + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/google/certificate-transparency-go/AUTHORS new file mode 100644 index 00000000000..ad514665ef6 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/AUTHORS @@ -0,0 +1,29 @@ +# This is the official list of benchmark authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +# +# Please keep the list sorted. + +Alex Cohn +Ed Maste +Elisha Silas +Fiaz Hossain +Google LLC +Internet Security Research Group +Jeff Trawick +Katriel Cohn-Gordon +Laël Cellier +Mark Schloesser +NORDUnet A/S +Nicholas Galbreath +Oliver Weidner +PrimeKey Solutions AB +Ruslan Kovalov +Sectigo Limited +Venafi, Inc. +Vladimir Rutsky +Ximin Luo diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md new file mode 100644 index 00000000000..0206cfe1248 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -0,0 +1,1311 @@ +# CERTIFICATE-TRANSPARENCY-GO Changelog + +## HEAD + +## v1.3.2 + +### Misc + +* [migrillian] remove etcd support in #1699 +* Bump golangci-lint from 1.55.1 to 1.61.0 (developers should update to this version). +* Update ctclient tool to support SCT extensions field by @liweitianux in https://github.com/google/certificate-transparency-go/pull/1645 +* Bump go to 1.23 +* [ct_hammer] support HTTPS and Bearer token for Authentication. +* [preloader] support Bearer token Authentication for non temporal logs. +* [preloader] support end indexes +* [CTFE] Short cache max-age when get-entries returns fewer entries than requested by @robstradling in https://github.com/google/certificate-transparency-go/pull/1707 +* [CTFE] Disalllow mismatching signature algorithm identifiers in #702. +* [jsonclient] surface HTTP Do and Read errors #1695 by @FiloSottile + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +* Suppress unnecessary duplicate key errors in the IssuanceChainStorage PostgreSQL implementation by @robstradling in https://github.com/google/certificate-transparency-go/pull/1678 +* Only store IssuanceChain if not cached by @robstradling in https://github.com/google/certificate-transparency-go/pull/1679 + +### CTFE Rate Limiting Of Non-Fresh Submissions + +To protect a log from being flooded with requests for "old" certificates, optional rate limiting for "non-fresh submissions" can be configured by providing the following flags: + +- `non_fresh_submission_age` +- `non_fresh_submission_burst` +- `non_fresh_submission_limit` + +This can help to ensure that the log maintains its ability to (1) accept "fresh" submissions and (2) distribute all log entries to monitors. + +* [CTFE] Configurable mechanism to rate-limit non-fresh submissions by @robstradling in https://github.com/google/certificate-transparency-go/pull/1698 + +### Dependency updates + +* Bump the docker-deps group across 5 directories with 3 updates (#1705) +* Bump google.golang.org/grpc from 1.72.1 to 1.72.2 in the all-deps group (#1704) +* Bump github.com/go-jose/go-jose/v4 in the go_modules group (#1700) +* Bump the all-deps group with 7 updates (#1701) +* Bump the all-deps group with 7 updates (#1693) +* Bump the docker-deps group across 4 directories with 1 update (#1694) +* Bump github/codeql-action from 3.28.13 to 3.28.16 in the all-deps group (#1692) +* Bump the all-deps group across 1 directory with 7 updates (#1688) +* Bump distroless/base-debian12 (#1686) +* Bump golangci/golangci-lint-action from 6.5.1 to 7.0.0 in the all-deps group (#1685) +* Bump the all-deps group with 4 updates (#1681) +* Bump the all-deps group with 6 updates (#1683) +* Bump the docker-deps group across 4 directories with 2 updates (#1682) +* Bump github.com/golang-jwt/jwt/v4 in the go_modules group (#1680) +* Bump golangci/golangci-lint-action in the all-deps group (#1676) +* Bump the all-deps group with 2 updates (#1677) +* Bump github/codeql-action from 3.28.10 to 3.28.11 in the all-deps group (#1670) +* Bump the all-deps group with 8 updates (#1672) +* Bump the docker-deps group across 4 directories with 1 update (#1671) +* Bump the docker-deps group across 4 directories with 1 update (#1668) +* Bump the all-deps group with 4 updates (#1666) +* Bump golangci-lint from 1.55.1 to 1.61.0 (#1667) +* Bump the all-deps group with 3 updates (#1665) +* Bump github.com/spf13/cobra from 1.8.1 to 1.9.1 in the all-deps group (#1660) +* Bump the docker-deps group across 5 directories with 2 updates (#1661) +* Bump golangci/golangci-lint-action in the all-deps group (#1662) +* Bump the docker-deps group across 4 directories with 1 update (#1656) +* Bump the all-deps group with 2 updates (#1654) +* Bump the all-deps group with 4 updates (#1657) +* Bump github/codeql-action from 3.28.5 to 3.28.8 in the all-deps group (#1652) +* Bump github.com/spf13/pflag from 1.0.5 to 1.0.6 in the all-deps group (#1651) +* Bump the all-deps group with 2 updates (#1649) +* Bump the all-deps group with 5 updates (#1650) +* Bump the docker-deps group across 5 directories with 3 updates (#1648) +* Bump google.golang.org/protobuf in the all-deps group (#1647) +* Bump golangci/golangci-lint-action in the all-deps group (#1646) + +## v1.3.1 + +* Add AllLogListSignatureURL by @AlexLaroche in https://github.com/google/certificate-transparency-go/pull/1634 +* Add TiledLogs to log list JSON by @mcpherrinm in https://github.com/google/certificate-transparency-go/pull/1635 +* chore: relax go directive to permit 1.22.x by @dnwe in https://github.com/google/certificate-transparency-go/pull/1640 + +### Dependency Update + +* Bump github.com/fullstorydev/grpcurl from 1.9.1 to 1.9.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1627 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1628 +* Bump the docker-deps group across 5 directories with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1630 +* Bump github/codeql-action from 3.27.5 to 3.27.6 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1629 +* Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1631 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1633 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1632 +* Bump the docker-deps group across 4 directories with 1 update by @dependabot in https://github.com/google/certificate-transparency-go/pull/1638 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1637 +* Bump the all-deps group across 1 directory with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1641 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1643 +* Bump google.golang.org/grpc from 1.69.2 to 1.69.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1642 + +## v1.3.0 + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +This feature now supports PostgreSQL, in addition to the support for MySQL/MariaDB that was added in [v1.2.0](#v1.2.0). + +Log operators can choose to enable this feature for new PostgreSQL-based CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/postgresql/schema.sql). The other available options are documented in the [v1.2.0](#v1.2.0) changelog entry. + +This change is tested in Cloud Build tests using the `postgres:17` Docker image as of the time of writing. + +* Add IssuanceChainStorage PostgreSQL implementation by @robstradling in https://github.com/google/certificate-transparency-go/pull/1618 + +### Misc + +* [Dependabot] Update all docker images in one PR by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1614 +* Explicitly include version tag by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1617 +* Add empty cloudbuild_postgresql.yaml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1623 + +### Dependency update + +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1609 +* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1611 +* Bump github/codeql-action from 3.27.0 to 3.27.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1610 +* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1612 +* Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1613 +* Bump the docker-deps group across 3 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1616 +* Bump github/codeql-action from 3.27.1 to 3.27.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1615 +* Bump the docker-deps group across 4 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1622 +* Bump github/codeql-action from 3.27.2 to 3.27.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1620 +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1621 +* Bump github.com/google/trillian from 1.6.1 to 1.7.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1624 +* Bump github/codeql-action from 3.27.4 to 3.27.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1625 + +## v1.2.2 + +* Recommended Go version for development: 1.22 + * Using a different version can lead to presubmits failing due to unexpected diffs. + +### Add TLS Support + +Add TLS support for Trillian: By using `--trillian_tls_ca_cert_file` flag, users can provide a CA certificate, that is used to establish a secure communication with Trillian log server. + +Add TLS support for ct_server: By using `--tls_certificate` and `--tls_key` flags, users can provide a service certificate and key, that enables the server to handle HTTPS requests. + +* Add TLS support for CTLog server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1523 +* Add TLS support for migrillian by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1525 +* fix TLS configuration for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1542 +* Add Trillian TLS support for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1551 + +### HTTP Idle Connection Timeout Flag + +A new flag `http_idle_timeout` is added to set the HTTP server's idle timeout value in the ct_server binary. This controls the maximum amount of time to wait for the next request when keep-alives are enabled. + +* add flag for HTTP idle connection timeout value by @bobcallaway in https://github.com/google/certificate-transparency-go/pull/1597 + +### Misc + +* Refactor issuance chain service by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1512 +* Use the version in the go.mod file for vuln checks by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1528 + +### Fixes + +* Fix failed tests on 32-bit OS by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1540 + +### Dependency update + +* Bump go.etcd.io/etcd/v3 from 3.5.13 to 3.5.14 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1500 +* Bump github/codeql-action from 3.25.6 to 3.25.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1501 +* Bump golang.org/x/net from 0.25.0 to 0.26.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1503 +* Group dependabot updates as much as possible by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1506 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1507 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1511 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1510 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1509 +* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1508 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1516 +* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1518 +* Bump alpine from 3.19 to 3.20 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1492 +* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1517 +* Bump golang from `aec4784` to `9678844` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1513 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1515 +* Bump golang from `aec4784` to `9678844` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1514 +* Bump alpine from `77726ef` to `b89d9c9` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1519 +* Bump k8s.io/klog/v2 from 2.130.0 to 2.130.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1521 +* Bump alpine from `77726ef` to `b89d9c9` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1520 +* Bump github/codeql-action from 3.25.10 to 3.25.11 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1526 +* Bump version of go used by the vuln checker by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1527 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1530 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1531 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1532 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1533 +* Bump actions/upload-artifact from 4.3.3 to 4.3.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1534 +* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1535 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1536 +* Bump github/codeql-action from 3.25.12 to 3.25.13 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1538 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1537 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1543 +* Bump golang from `6c27802` to `af9b40f` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1544 +* Bump golang from `6c27802` to `af9b40f` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1548 +* Bump golang from `6c27802` to `af9b40f` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1547 +* Bump alpine from `b89d9c9` to `0a4eaa0` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1546 +* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1545 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1549 +* Bump golang.org/x/time from 0.5.0 to 0.6.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1550 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1552 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1553 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1554 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1555 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1556 +* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1557 +* Bump github.com/prometheus/client_golang from 1.19.1 to 1.20.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1559 +* Bump github/codeql-action from 3.26.0 to 3.26.3 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1561 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1558 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1563 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1560 +* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1562 +* Bump go version to 1.22.6 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1564 +* Bump github.com/prometheus/client_golang from 1.20.0 to 1.20.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1565 +* Bump github/codeql-action from 3.26.3 to 3.26.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1566 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1568 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1569 +* Bump go from 1.22.6 to 1.22.7 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1574 +* Bump alpine from `0a4eaa0` to `beefdbd` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1571 +* Bump the all-deps group across 1 directory with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1577 +* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1575 +* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1576 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1572 +* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1573 +* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1578 +* Bump github/codeql-action from 3.26.6 to 3.26.7 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1579 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1580 +* Bump github/codeql-action from 3.26.7 to 3.26.8 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1581 +* Bump distroless/base-debian12 from `c925d12` to `88e0a2a` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1582 +* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1585 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1583 +* Bump golang from `1a5326b` to `dba79eb` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1584 +* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1587 +* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1586 +* Bump the all-deps group with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1588 +* Bump the all-deps group with 6 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1589 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1593 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1592 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1591 +* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1590 +* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1595 +* Bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1598 +* Bump golang from `18d2f94` to `2341ddf` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1602 +* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1599 +* Bump golang from `18d2f94` to `2341ddf` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1600 +* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1601 +* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1603 +* Bump distroless/base-debian12 from `6ae5fe6` to `8fe31fb` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1604 + +## v1.2.1 + +### Fixes + +* Fix Go potential bugs and maintainability by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1496 + +### Dependency update + +* Bump google.golang.org/grpc from 1.63.2 to 1.64.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1482 + +## v1.2.0 + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +To reduce CT/Trillian database storage by deduplication of the entire issuance chain (intermediate certificate(s) and root certificate) that is currently stored in the Trillian merkle tree leaf ExtraData field. Storage cost should be reduced by at least 33% for new CT logs with this feature enabled. Currently only MySQL/MariaDB is supported to store the issuance chain in the CTFE database. + +Existing logs are not affected by this change. + +Log operators can choose to opt-in this change for new CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/mysql/schema.sql). See [example](trillian/examples/deployment/docker/ctfe/ct_server.cfg). + +- `ctfe_storage_connection_string` +- `extra_data_issuance_chain_storage_backend` + +An optional LRU cache can be enabled by providing the following flags. + +- `cache_type` +- `cache_size` +- `cache_ttl` + +This change is tested in Cloud Build tests using the `mysql:8.4` Docker image as of the time of writing. + +* Add issuance chain storage interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1430 +* Add issuance chain cache interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1431 +* Add CTFE extra data storage saving configs to config.proto by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1432 +* Add new types `PrecertChainEntryHash` and `CertificateChainHash` for TLS marshal/unmarshal in storage saving by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1435 +* Add IssuanceChainCache LRU implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1454 +* Add issuance chain service by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1452 +* Add CTFE extra data storage saving configs validation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1456 +* Add IssuanceChainStorage MySQL implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1462 +* Fix errcheck lint in mysql test by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1464 +* CTFE Extra Data Issuance Chain Deduplication by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1477 +* Fix incorrect deployment doc and server config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1494 + +### Submission proxy: Root compatibility checking + +* Adds the ability for a CT client to disable root compatibile checking by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1258 + +### Fixes + +* Return 429 Too Many Requests for gRPC error code `ResourceExhausted` from Trillian by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1401 +* Safeguard against redirects on PUT request by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1418 +* Fix CT client upload to be safe against no-op POSTs by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1424 + +### Misc + +* Prefix errors.New variables with the word "Err" by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1399 +* Remove lint exceptions and fix remaining issues by @silaselisha in https://github.com/google/certificate-transparency-go/pull/1438 +* Fix invalid Go toolchain version by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1471 +* Regenerate proto files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1489 + +### Dependency update + +* Bump distroless/base-debian12 from `5eae9ef` to `28a7f1f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1388 +* Bump github/codeql-action from 3.24.6 to 3.24.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1389 +* Bump actions/checkout from 4.1.1 to 4.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1390 +* Bump golang from `6699d28` to `7f9c058` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1391 +* Bump golang from `6699d28` to `7f9c058` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1392 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1393 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1394 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1395 +* Bump golang from `7f9c058` to `d996c64` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1396 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1397 +* Bump golang from `7f9c058` to `d996c64` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1398 +* Bump github/codeql-action from 3.24.7 to 3.24.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1400 +* Bump github/codeql-action from 3.24.8 to 3.24.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1402 +* Bump go.etcd.io/etcd/v3 from 3.5.12 to 3.5.13 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1405 +* Bump distroless/base-debian12 from `28a7f1f` to `611d30d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1406 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1407 +* Bump golang.org/x/net from 0.22.0 to 0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1408 +* update govulncheck go version from 1.21.8 to 1.21.9 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1412 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1409 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1410 +* Bump golang.org/x/crypto from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1414 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1411 +* Bump github/codeql-action from 3.24.9 to 3.24.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1415 +* Bump golang.org/x/net from 0.23.0 to 0.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1416 +* Bump google.golang.org/grpc from 1.62.1 to 1.63.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1417 +* Bump github.com/fullstorydev/grpcurl from 1.8.9 to 1.9.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1419 +* Bump golang from `48b942a` to `3451eec` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1421 +* Bump golang from `48b942a` to `3451eec` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1423 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1420 +* Bump golang from `3451eec` to `b03f3ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1426 +* Bump golang from `3451eec` to `b03f3ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1425 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1422 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1427 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1428 +* Bump github/codeql-action from 3.24.10 to 3.25.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1433 +* Bump github/codeql-action from 3.25.0 to 3.25.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1434 +* Bump actions/upload-artifact from 4.3.1 to 4.3.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1436 +* Bump actions/checkout from 4.1.2 to 4.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1437 +* Bump actions/upload-artifact from 4.3.2 to 4.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1440 +* Bump github/codeql-action from 3.25.1 to 3.25.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1441 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1444 +* Bump golang from `b03f3ba` to `d0902ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1443 +* Bump github.com/rs/cors from 1.10.1 to 1.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1442 +* Bump golang from `b03f3ba` to `d0902ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1447 +* Bump actions/checkout from 4.1.3 to 4.1.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1446 +* Bump github/codeql-action from 3.25.2 to 3.25.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1449 +* Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1448 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1445 +* Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1451 +* Bump distroless/base-debian12 from `611d30d` to `d8d01e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1450 +* Bump google.golang.org/protobuf from 1.33.1-0.20240408130810-98873a205002 to 1.34.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1453 +* Bump actions/setup-go from 5.0.0 to 5.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1455 +* Bump golang.org/x/net from 0.24.0 to 0.25.0 and golang.org/x/crypto from v0.22.0 to v0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1457 +* Bump google.golang.org/protobuf from 1.34.0 to 1.34.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1458 +* Bump distroless/base-debian12 from `d8d01e2` to `786007f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1461 +* Bump golangci/golangci-lint-action from 5.1.0 to 5.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1460 +* Bump `go-version-input` to 1.21.10 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1472 +* Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1473 +* Bump actions/checkout from 4.1.4 to 4.1.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1469 +* Bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1465 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1466 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1463 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1470 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1467 +* Bump github/codeql-action from 3.25.3 to 3.25.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1474 +* Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1475 +* Bump ossf/scorecard-action from 2.3.1 to 2.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1476 +* Bump github/codeql-action from 3.25.4 to 3.25.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1478 +* Bump golang from `6d71b7c` to `ef27a3c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1480 +* Bump golang from `6d71b7c` to `ef27a3c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1481 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1479 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1483 +* Bump golang from `ef27a3c` to `5c56bd4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1484 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1485 +* Bump golang from `ef27a3c` to `5c56bd4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1486 +* Bump actions/checkout from 4.1.5 to 4.1.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1487 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1488 +* Bump github/codeql-action from 3.25.5 to 3.25.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1490 +* Bump alpine from `c5b1261` to `58d02b4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1491 +* Bump alpine from `58d02b4` to `77726ef` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1493 + +## v1.1.8 + +* Recommended Go version for development: 1.21 + * Using a different version can lead to presubmits failing due to unexpected diffs. + +### Add support for AIX + +* crypto/x509: add AIX operating system by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1277 + +### Monitoring + +* Distribution metric to monitor the start of get-entries requests by @phbnf in https://github.com/google/certificate-transparency-go/pull/1364 + +### Fixes + +* Use the appropriate HTTP response code for backend timeouts by @robstradling in https://github.com/google/certificate-transparency-go/pull/1313 + +### Misc + +* Move golangci-lint from Cloud Build to GitHub Action by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1230 +* Set golangci-lint GH action timeout to 5m by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1231 +* Added Slack channel details by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1246 +* Improve fuzzing by @AdamKorcz in https://github.com/google/certificate-transparency-go/pull/1345 + +### Dependency update + +* Bump golang from `20f9ab5` to `5ee1296` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1216 +* Bump golang from `20f9ab5` to `5ee1296` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1217 +* Bump golang from `20f9ab5` to `5ee1296` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1218 +* Bump k8s.io/klog/v2 from 2.100.1 to 2.110.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1219 +* Bump golang from `20f9ab5` to `5ee1296` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1220 +* Bump golang from `5ee1296` to `5bafbbb` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1221 +* Bump golang from `5ee1296` to `5bafbbb` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1222 +* Bump golang from `5ee1296` to `5bafbbb` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1223 +* Bump golang from `5ee1296` to `5bafbbb` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1224 +* Update the minimal image to gcr.io/distroless/base-debian12 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1148 +* Bump jq from 1.6 to 1.7 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1225 +* Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1226 +* Bump golang.org/x/time from 0.3.0 to 0.4.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1227 +* Bump github.com/mattn/go-sqlite3 from 1.14.17 to 1.14.18 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1228 +* Bump github.com/gorilla/mux from 1.8.0 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1229 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1232 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1233 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1234 +* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1235 +* Bump go-version-input from 1.20.10 to 1.20.11 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1238 +* Bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1236 +* Bump github/codeql-action from 2.22.5 to 2.22.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1240 +* Bump github/codeql-action from 2.22.6 to 2.22.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1241 +* Bump golang from `85aacbe` to `dadce81` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1243 +* Bump golang from `85aacbe` to `dadce81` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1242 +* Bump golang from `85aacbe` to `dadce81` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1244 +* Bump golang from `85aacbe` to `dadce81` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1245 +* Bump golang from `dadce81` to `52362e2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1247 +* Bump golang from `dadce81` to `52362e2` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1248 +* Bump golang from `dadce81` to `52362e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1249 +* Bump golang from `dadce81` to `52362e2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1250 +* Bump github/codeql-action from 2.22.7 to 2.22.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1251 +* Bump golang.org/x/net from 0.18.0 to 0.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1252 +* Bump golang.org/x/time from 0.4.0 to 0.5.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1254 +* Bump alpine from `eece025` to `34871e7` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1256 +* Bump alpine from `eece025` to `34871e7` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1257 +* Bump go-version-input from 1.20.11 to 1.20.12 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1264 +* Bump actions/setup-go from 4.1.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1261 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1259 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1263 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1262 +* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1260 +* Bump go.etcd.io/etcd/v3 from 3.5.10 to 3.5.11 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1266 +* Bump github/codeql-action from 2.22.8 to 2.22.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1269 +* Bump alpine from `34871e7` to `51b6726` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1270 +* Bump alpine from 3.18 to 3.19 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1271 +* Bump golang from `a6b787c` to `2d3b13c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1272 +* Bump golang from `a6b787c` to `2d3b13c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1273 +* Bump golang from `a6b787c` to `2d3b13c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1274 +* Bump golang from `a6b787c` to `2d3b13c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1275 +* Bump github/codeql-action from 2.22.9 to 2.22.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1278 +* Bump google.golang.org/grpc from 1.59.0 to 1.60.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1279 +* Bump github/codeql-action from 2.22.10 to 3.22.11 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1280 +* Bump distroless/base-debian12 from `1dfdb5e` to `8a0bb63` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1281 +* Bump github.com/google/trillian from 1.5.3 to 1.5.4-0.20240110091238-00ca9abe023d by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1297 +* Bump actions/upload-artifact from 3.1.3 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1282 +* Bump github/codeql-action from 3.22.11 to 3.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1295 +* Bump github.com/mattn/go-sqlite3 from 1.14.18 to 1.14.19 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1283 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1300 +* Bump distroless/base-debian12 from `8a0bb63` to `0a93daa` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1284 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1299 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1298 +* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1301 +* Bump golang from `688ad7f` to `1e8ea75` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1306 +* Bump golang from `688ad7f` to `1e8ea75` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1305 +* Use trillian release instead of pinned commit by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1304 +* Bump actions/upload-artifact from 4.0.0 to 4.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1310 +* Bump golang from `1e8ea75` to `cbee5d2` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1312 +* Bump golang from `688ad7f` to `cbee5d2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1308 +* Bump golang from `1e8ea75` to `cbee5d2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1311 +* Bump golang.org/x/net from 0.19.0 to 0.20.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1302 +* Bump golang from `b651ed8` to `cbee5d2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1309 +* Bump golang from `cbee5d2` to `c4b696f` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1314 +* Bump golang from `cbee5d2` to `c4b696f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1315 +* Bump github/codeql-action from 3.23.0 to 3.23.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1317 +* Bump golang from `cbee5d2` to `c4b696f` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1316 +* Bump golang from `cbee5d2` to `c4b696f` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1318 +* Bump k8s.io/klog/v2 from 2.120.0 to 2.120.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1319 +* Bump actions/upload-artifact from 4.1.0 to 4.2.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1320 +* Bump actions/upload-artifact from 4.2.0 to 4.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1321 +* Bump golang from `c4b696f` to `d8c365d` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1326 +* Bump golang from `c4b696f` to `d8c365d` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1323 +* Bump google.golang.org/grpc from 1.60.1 to 1.61.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1324 +* Bump golang from `c4b696f` to `d8c365d` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1322 +* Bump golang from `c4b696f` to `d8c365d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1325 +* Bump github.com/mattn/go-sqlite3 from 1.14.19 to 1.14.20 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1327 +* Bump github/codeql-action from 3.23.1 to 3.23.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1328 +* Bump alpine from `51b6726` to `c5b1261` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1330 +* Bump alpine from `51b6726` to `c5b1261` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1329 +* Bump go.etcd.io/etcd/v3 from 3.5.11 to 3.5.12 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1332 +* Bump github.com/mattn/go-sqlite3 from 1.14.20 to 1.14.21 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1333 +* Bump golang from `d8c365d` to `69bfed3` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1335 +* Bump golang from `d8c365d` to `69bfed3` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1338 +* Bump golang from `d8c365d` to `69bfed3` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1337 +* Bump golang from `d8c365d` to `69bfed3` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1336 +* Bump golang from `69bfed3` to `3efef61` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1339 +* Bump github.com/mattn/go-sqlite3 from 1.14.21 to 1.14.22 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1344 +* Bump golang from `69bfed3` to `3efef61` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1341 +* Bump golang from `69bfed3` to `3efef61` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1343 +* Bump distroless/base-debian12 from `0a93daa` to `f47fa3d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1340 +* Bump golang from `69bfed3` to `3efef61` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1342 +* Bump github/codeql-action from 3.23.2 to 3.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1346 +* Bump actions/upload-artifact from 4.3.0 to 4.3.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1347 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1350 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1348 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1349 +* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1351 +* Bump golang.org/x/crypto from 0.18.0 to 0.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1353 +* Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1354 +* Bump golang.org/x/net from 0.20.0 to 0.21.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1352 +* Bump distroless/base-debian12 from `f47fa3d` to `2102ce1` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1355 +* Bump github/codeql-action from 3.24.0 to 3.24.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1357 +* Bump golang from `874c267` to `5a3e169` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1356 +* Bump golang from `874c267` to `5a3e169` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1358 +* Bump golang from `874c267` to `5a3e169` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1359 +* Bump golang from `874c267` to `5a3e169` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1360 +* Bump github/codeql-action from 3.24.1 to 3.24.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1366 +* Bump golang from `5a3e169` to `925fe3f` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1363 +* Bump google.golang.org/grpc from 1.61.0 to 1.61.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1362 +* Bump golang from `5a3e169` to `925fe3f` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1365 +* Bump golang from `5a3e169` to `925fe3f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1361 +* Bump golang from `5a3e169` to `925fe3f` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1367 +* Bump golang/govulncheck-action from 1.0.1 to 1.0.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1368 +* Bump github/codeql-action from 3.24.3 to 3.24.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1371 +* Bump google.golang.org/grpc from 1.61.1 to 1.62.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1369 +* Bump distroless/base-debian12 from `2102ce1` to `5eae9ef` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1372 +* Bump distroless/base-debian12 from `5eae9ef` to `f9b0e86` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1375 +* Bump golang.org/x/crypto from 0.19.0 to 0.20.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1374 +* Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1373 +* Bump github/codeql-action from 3.24.5 to 3.24.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1377 +* Bump distroless/base-debian12 from `f9b0e86` to `5eae9ef` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1376 +* Bump golang.org/x/net from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1378 +* Bump Go from 1.20 to 1.21 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1386 +* Bump google.golang.org/grpc from 1.62.0 to 1.62.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1380 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1382 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1385 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1384 +* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1383 + +## v1.1.7 + +* Recommended Go version for development: 1.20 + * This is the version used by the Cloud Build presubmits. Using a different version can lead to presubmits failing due to unexpected diffs. + +* Bump golangci-lint from 1.51.1 to 1.55.1 (developers should update to this version). + +### Add support for WASI port + +* Add build tags for wasip1 GOOS by @flavio in https://github.com/google/certificate-transparency-go/pull/1089 + +### Add support for IBM Z operating system z/OS + +* Add build tags for zOS by @onlywork1984 in https://github.com/google/certificate-transparency-go/pull/1088 + +### Log List + +* Add support for "is_all_logs" field in loglist3 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1095 + +### Documentation + +* Improve Dockerized Test Deployment documentation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1179 + +### Misc + +* Escape forward slashes in certificate Subject names when used as user quota id strings by @robstradling in https://github.com/google/certificate-transparency-go/pull/1059 +* Search whole chain looking for issuer match by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1112 +* Use proper check per @AGWA instead of buggy check introduced in #1112 by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1114 +* Build the ctfe/ct_server binary without depending on glibc by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1119 +* Migrate CTFE Ingress manifest to support GKE version 1.23 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1086 +* Remove Dependabot ignore configuration by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1097 +* Add "github-actions" and "docker" Dependabot config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1101 +* Add top level permission in CodeQL workflow by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1102 +* Pin Docker image dependencies by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1110 +* Remove GO111MODULE from Dockerfile and Cloud Build yaml files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1113 +* Add docker Dependabot config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1126 +* Export is_mirror = 0.0 for non mirror instead of nothing by @phbnf in https://github.com/google/certificate-transparency-go/pull/1133 +* Add govulncheck GitHub action by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1145 +* Spelling by @jsoref in https://github.com/google/certificate-transparency-go/pull/1144 + +### Dependency update + +* Bump Go from 1.19 to 1.20 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1146 +* Bump golangci-lint from 1.51.1 to 1.55.1 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1214 +* Bump go.etcd.io/etcd/v3 from 3.5.8 to 3.5.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1083 +* Bump golang.org/x/crypto from 0.8.0 to 0.9.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/108 +* Bump github.com/mattn/go-sqlite3 from 1.14.16 to 1.14.17 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1092 +* Bump golang.org/x/net from 0.10.0 to 0.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1094 +* Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1098 +* Bump google.golang.org/protobuf from 1.30.0 to 1.31.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1099 +* Bump golang.org/x/net from 0.11.0 to 0.12.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1108 +* Bump actions/checkout from 3.1.0 to 3.5.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1103 +* Bump github/codeql-action from 2.1.27 to 2.20.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1104 +* Bump ossf/scorecard-action from 2.0.6 to 2.2.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1105 +* Bump actions/upload-artifact from 3.1.0 to 3.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1106 +* Bump github/codeql-action from 2.20.3 to 2.20.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1115 +* Bump github/codeql-action from 2.20.4 to 2.21.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1117 +* Bump golang.org/x/net from 0.12.0 to 0.14.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1124 +* Bump github/codeql-action from 2.21.0 to 2.21.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1121 +* Bump github/codeql-action from 2.21.2 to 2.21.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1125 +* Bump golang from `fd9306e` to `eb3f9ac` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1127 +* Bump alpine from 3.8 to 3.18 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1129 +* Bump golang from `fd9306e` to `eb3f9ac` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1128 +* Bump alpine from `82d1e9d` to `7144f7b` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1130 +* Bump golang from `fd9306e` to `eb3f9ac` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1131 +* Bump golang from 1.19-alpine to 1.21-alpine in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1132 +* Bump actions/checkout from 3.5.3 to 3.6.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1134 +* Bump github/codeql-action from 2.21.4 to 2.21.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1135 +* Bump distroless/base from `73deaaf` to `46c5b9b` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1136 +* Bump actions/checkout from 3.6.0 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1137 +* Bump golang.org/x/net from 0.14.0 to 0.15.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1139 +* Bump github.com/rs/cors from 1.9.0 to 1.10.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1140 +* Bump actions/upload-artifact from 3.1.2 to 3.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1141 +* Bump golang from `445f340` to `96634e5` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1142 +* Bump github/codeql-action from 2.21.5 to 2.21.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1149 +* Bump Docker golang base images to 1.21.1 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1147 +* Bump github/codeql-action from 2.21.6 to 2.21.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1150 +* Bump github/codeql-action from 2.21.7 to 2.21.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1152 +* Bump golang from `d3114db` to `a0b3bc4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1155 +* Bump golang from `d3114db` to `a0b3bc4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1157 +* Bump golang from `d3114db` to `a0b3bc4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1156 +* Bump golang from `d3114db` to `a0b3bc4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1158 +* Bump golang from `e06b3a4` to `114b9cc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1159 +* Bump golang from `a0b3bc4` to `114b9cc` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1160 +* Bump golang from `a0b3bc4` to `114b9cc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1161 +* Bump actions/checkout from 4.0.0 to 4.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1162 +* Bump golang from `114b9cc` to `9c7ea4a` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1163 +* Bump golang from `114b9cc` to `9c7ea4a` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1166 +* Bump golang from `114b9cc` to `9c7ea4a` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1165 +* Bump golang from `114b9cc` to `9c7ea4a` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1164 +* Bump github/codeql-action from 2.21.8 to 2.21.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1169 +* Bump golang from `9c7ea4a` to `61f84bc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1168 +* Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1172 +* Bump golang from `9c7ea4a` to `61f84bc` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1170 +* Bump github.com/rs/cors from 1.10.0 to 1.10.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1176 +* Bump alpine from `7144f7b` to `eece025` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1174 +* Bump alpine from `7144f7b` to `eece025` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1175 +* Bump golang from `9c7ea4a` to `61f84bc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1171 +* Bump golang from `9c7ea4a` to `61f84bc` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1173 +* Bump distroless/base from `46c5b9b` to `a35b652` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1177 +* Bump golang.org/x/crypto from 0.13.0 to 0.14.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1178 +* Bump github/codeql-action from 2.21.9 to 2.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1180 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1181 +* Bump golang.org/x/net from 0.15.0 to 0.16.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1184 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1182 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1185 +* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1183 +* Bump github/codeql-action from 2.22.0 to 2.22.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1186 +* Bump distroless/base from `a35b652` to `b31a6e0` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1188 +* Bump ossf/scorecard-action from 2.2.0 to 2.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1187 +* Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1189 +* Bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1190 +* Bump go-version-input from 1.20.8 to 1.20.10 in govulncheck by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1195 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1193 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1191 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1194 +* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1192 +* Bump golang from `a94b089` to `8f9a1ec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1196 +* Bump github/codeql-action from 2.22.1 to 2.22.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1197 +* Bump golang from `a94b089` to `5cc7ddc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1200 +* Bump golang from `a94b089` to `5cc7ddc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1199 +* Bump github/codeql-action from 2.22.2 to 2.22.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1202 +* Bump golang from `5cc7ddc` to `20f9ab5` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1203 +* Bump golang from `a94b089` to `20f9ab5` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1198 +* Bump golang from `8f9a1ec` to `20f9ab5` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1201 +* Bump actions/checkout from 4.1.0 to 4.1.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1204 +* Bump github/codeql-action from 2.22.3 to 2.22.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1206 +* Bump ossf/scorecard-action from 2.3.0 to 2.3.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1207 +* Bump github/codeql-action from 2.22.4 to 2.22.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1209 +* Bump multiple Go module dependencies by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1213 + +## v1.1.6 + +### Dependency update + +* Bump Trillian to v1.5.2 +* Bump Prometheus to v0.43.1 + +## v1.1.5 + +### Public/Private Key Consistency + + * #1044: If a public key has been configured for a log, check that it is consistent with the private key. + * #1046: Ensure that no two logs in the CTFE configuration use the same private key. + +### Cleanup + + * Remove v2 log list package files. + +### Misc + + * Updated golangci-lint to v1.51.1 (developers should update to this version). + * Bump Go version from 1.17 to 1.19. + +## v1.1.4 + +[Published 2022-10-21](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.4) + +### Cleanup + + * Remove log list v1 package and its dependencies. + +### Migrillian + + * #960: Skip consistency check when root is size zero. + +### Misc + + * Update Trillian to [0a389c4](https://github.com/google/trillian/commit/0a389c4bb8d97fb3be8f55d7e5b428cf4304986f) + * Migrate loglist dependency from v1 to v3 in ctclient cmd. + * Migrate loglist dependency from v1 to v3 in ctutil/loginfo.go + * Migrate loglist dependency from v1 to v3 in ctutil/sctscan.go + * Migrate loglist dependency from v1 to v3 in trillian/integration/ct_hammer/main.go + * Downgrade 429 errors to verbosity 2 + +## v1.1.3 + +[Published 2022-05-14](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.3) + +### Integration + + * Breaking change to API for `integration.HammerCTLog`: + * Added `ctx` as first argument, and terminate loop if it becomes cancelled + +### JSONClient + + * PostAndParseWithRetry now does backoff-and-retry upon receiving HTTP 429. + +### Cleanup + + * `WithBalancerName` is deprecated and removed, using the recommended way. + * `ctfe.PEMCertPool` type has been moved to `x509util.PEMCertPool` to reduce + dependencies (#903). + +### Misc + + * updated golangci-lint to v1.46.1 (developers should update to this version) + * update `google.golang.org/grpc` to v1.46.0 + * `ctclient` tool now uses Cobra for better CLI experience (#901). + * #800: Remove dependency from `ratelimit`. + * #927: Add read-only mode to CTFE config. + +## v1.1.2 + +[Published 2021-09-21](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.2) + +### CTFE + + * Removed the `-by_range` flag. + +### Updated dependencies + + * Trillian from v1.3.11 to v1.4.0 + * protobuf to v2 + +## v1.1.1 + +[Published 2020-10-06](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.1) + +### Tools + +#### CT Hammer + +Added a flag (--strict_sth_consistency_size) which when set to true enforces the current behaviour of only request consistency proofs between tree sizes for which the hammer has seen valid STHs. +When setting this flag to false, if no two usable STHs are available the hammer will attempt to request a consistency proof between the latest STH it's seen and a random smaller (but > 0) tree size. + + +### CTFE + +#### Caching + +The CTFE now includes a Cache-Control header in responses containing purely +immutable data, e.g. those for get-entries and get-proof-by-hash. This allows +clients and proxies to cache these responses for up to 24 hours. + +#### EKU Filtering + +> :warning: **It is not yet recommended to enable this option in a production CT Log!** + +CTFE now supports filtering logging submissions by leaf certificate EKU. +This is enabled by adding an extKeyUsage list to a log's stanza in the +config file. + +The format is a list of strings corresponding to the supported golang x509 EKUs: + |Config string | Extended Key Usage | + |----------------------------|----------------------------------------| + |`Any` | ExtKeyUsageAny | + |`ServerAuth` | ExtKeyUsageServerAuth | + |`ClientAuth` | ExtKeyUsageClientAuth | + |`CodeSigning` | ExtKeyUsageCodeSigning | + |`EmailProtection` | ExtKeyUsageEmailProtection | + |`IPSECEndSystem` | ExtKeyUsageIPSECEndSystem | + |`IPSECTunnel` | ExtKeyUsageIPSECTunnel | + |`IPSECUser` | ExtKeyUsageIPSECUser | + |`TimeStamping` | ExtKeyUsageTimeStamping | + |`OCSPSigning` | ExtKeyUsageOCSPSigning | + |`MicrosoftServerGatedCrypto`| ExtKeyUsageMicrosoftServerGatedCrypto | + |`NetscapeServerGatedCrypto` | ExtKeyUsageNetscapeServerGatedCrypto | + +When an extKeyUsage list is specified, the CT Log will reject logging +submissions for leaf certificates that do not contain an EKU present in this +list. + +When enabled, EKU filtering is only performed at the leaf level (i.e. there is +no 'nested' EKU filtering performed). + +If no list is specified, or the list contains an `Any` entry, no EKU +filtering will be performed. + +#### GetEntries +Calls to `get-entries` which are at (or above) the maximum permitted number of +entries whose `start` parameter does not fall on a multiple of the maximum +permitted number of entries, will have their responses truncated such that +subsequent requests will align with this boundary. +This is intended to coerce callers of `get-entries` into all using the same +`start` and `end` parameters and thereby increase the cacheability of +these requests. + +e.g.: + +
+Old behaviour:
+             1         2         3
+             0         0         0
+Entries>-----|---------|---------|----...
+Client A -------|---------|----------|...
+Client B --|--------|---------|-------...
+           ^        ^         ^
+           `--------`---------`---- requests
+
+With coercion (max batch = 10 entries):
+             1         2         3
+             0         0         0
+Entries>-----|---------|---------|----...
+Client A ----X---------|---------|...
+Client B --|-X---------|---------|-------...
+             ^
+             `-- Requests truncated
+
+ +This behaviour can be disabled by setting the `--align_getentries` +flag to false. + +#### Flags + +The `ct_server` binary changed the default of these flags: + +- `by_range` - Now defaults to `true` + +The `ct_server` binary added the following flags: +- `align_getentries` - See GetEntries section above for details + +Added `backend` flag to `migrillian`, which now replaces the deprecated +"backend" feature of Migrillian configs. + +#### FixedBackendResolver Replaced + +This was previously used in situations where a comma separated list of +backends was provided in the `rpcBackend` flag rather than a single value. + +It has been replaced by equivalent functionality using a newer gRPC API. +However this support was only intended for use in integration tests. In +production we recommend the use of etcd or a gRPC load balancer. + +### LogList + +Log list tools updated to use the correct v2 URL (from v2_beta previously). + +### Libraries + +#### x509 fork + +Merged upstream Go 1.13 and Go 1.14 changes (with the exception +of https://github.com/golang/go/commit/14521198679e, to allow +old certs using a malformed root still to be logged). + +#### asn1 fork + +Merged upstream Go 1.14 changes. + +#### ctutil + +Added VerifySCTWithVerifier() to verify SCTs using a given ct.SignatureVerifier. + +### Configuration Files + +Configuration files that previously had to be text-encoded Protobuf messages can +now alternatively be binary-encoded instead. + +### JSONClient + +- `PostAndParseWithRetry` error logging now includes log URI in messages. + +### Minimal Gossip Example + +All the code for this, except for the x509ext package, has been moved over +to the [trillian-examples](https://github.com/google/trillian-examples) repository. + +This keeps the code together and removes a circular dependency between the +two repositories. The package layout and structure remains the same so +updating should just mean changing any relevant import paths. + +### Dependencies + +A circular dependency on the [monologue](https://github.com/google/monologue) repository has been removed. + +A circular dependency on the [trillian-examples](https://github.com/google/trillian-examples) repository has been removed. + +The version of trillian in use has been updated to 1.3.11. This has required +various other dependency updates including gRPC and protobuf. This code now +uses the v2 proto API. The Travis tests now expect the 3.11.4 version of +protoc. + +The version of etcd in use has been switched to the one from `go.etcd.io`. + +Most of the above changes are to align versions more closely with the ones +used in the trillian repository. + +## v1.1.0 + +Published 2019-11-14 15:00:00 +0000 UTC + +### CTFE + +The `reject_expired` and `reject_unexpired` configuration fields for the CTFE +have been changed so that their behaviour reflects their name: + +- `reject_expired` only rejects expired certificates (i.e. it now allows + not-yet-valid certificates). +- `reject_unexpired` only allows expired certificates (i.e. it now rejects + not-yet-valid certificates). + +A `reject_extensions` configuration field for the CTFE was added, this allows +submissions to be rejected if they contain an extension with any of the +specified OIDs. + +A `frozen_sth` configuration field for the CTFE was added. This STH will be +served permanently. It must be signed by the log's private key. + +A `/healthz` URL has been added which responds with HTTP 200 OK and the string +"ok" when the server is up. + +#### Flags + +The `ct_server` binary has these new flags: + +- `mask_internal_errors` - Removes error strings from HTTP 500 responses + (Internal Server Error) + +Removed default values for `--metrics_endpoint` and `--log_rpc_server` flags. +This makes it easier to get the documented "unset" behaviour. + +#### Metrics + +The CTFE exports these new metrics: + +- `is_mirror` - set to 1 for mirror logs (copies of logs hosted elsewhere) +- `frozen_sth_timestamp` - time of the frozen Signed Tree Head in milliseconds + since the epoch + +#### Kubernetes + +Updated prometheus-to-sd to v0.5.2. + +A dedicated node pool is no longer required by the Kubernetes manifests. + +### Log Lists + +A new package has been created for parsing, searching and creating JSON log +lists compatible with the +[v2 schema](http://www.gstatic.com/ct/log_list/v2_beta/log_list_schema.json): +`github.com/google/certificate-transparency-go/loglist2`. + +### Docker Images + +Our Docker images have been updated to use Go 1.11 and +[Distroless base images](https://github.com/GoogleContainerTools/distroless). + +The CTFE Docker image now sets `ENTRYPOINT`. + +### Utilities / Libraries + +#### jsonclient + +The `jsonclient` package now copes with empty HTTP responses. The user-agent +header it sends can now be specified. + +#### x509 and asn1 forks + +Merged upstream changes from Go 1.12 into the `asn1` and `x509` packages. + +Added a "lax" tag to `asn1` that applies recursively and makes some checks more +relaxed: + +- parsePrintableString() copes with invalid PrintableString contents, e.g. use + of tagPrintableString when the string data is really ISO8859-1. +- checkInteger() allows integers that are not minimally encoded (and so are + not correct DER). +- OIDs are allowed to be empty. + +The following `x509` functions will now return `x509.NonFatalErrors` if ASN.1 +parsing fails in strict mode but succeeds in lax mode. Previously, they only +attempted strict mode parsing. + +- `x509.ParseTBSCertificate()` +- `x509.ParseCertificate()` +- `x509.ParseCertificates()` + +The `x509` package will now treat a negative RSA modulus as a non-fatal error. + +The `x509` package now supports RSASES-OAEP and Ed25519 keys. + +#### ctclient + +The `ctclient` tool now defaults to using +[all_logs_list.json](https://www.gstatic.com/ct/log_list/all_logs_list.json) +instead of [log_list.json](https://www.gstatic.com/ct/log_list/log_list.json). +This can be overridden using the `--log_list` flag. + +It can now perform inclusion checks on pre-certificates. + +It has these new commands: + +- `bisect` - Finds a log entry given a timestamp. + +It has these new flags: + +- `--chain` - Displays the entire certificate chain +- `--dns_server` - The DNS server to direct queries to (system resolver by + default) +- `--skip_https_verify` - Skips verification of the HTTPS connection +- `--timestamp` - Timestamp to use for `bisect` and `inclusion` commands (for + `inclusion`, only if --leaf_hash is not used) + +It now accepts hex or base64-encoded strings for the `--tree_hash`, +`--prev_hash` and `--leaf_hash` flags. + +#### certcheck + +The `certcheck` tool has these new flags: + +- `--check_time` - Check current validity of certificate (replaces + `--timecheck`) +- `--check_name` - Check validity of certificate name +- `--check_eku` - Check validity of EKU nesting +- `--check_path_len` - Check validity of path length constraint +- `--check_name_constraint` - Check name constraints +- `--check_unknown_critical_exts` - Check for unknown critical extensions + (replaces `--ignore_unknown_critical_exts`) +- `--strict` - Set non-zero exit code for non-fatal errors in parsing + +#### sctcheck + +The `sctcheck` tool has these new flags: + +- `--check_inclusion` - Checks that the SCT was honoured (i.e. the + corresponding certificate was included in the issuing CT log) + +#### ct_hammer + +The `ct_hammer` tool has these new flags: + +- `--duplicate_chance` - Allows setting the probability of the hammer sending + a duplicate submission. + +## v1.0.21 - CTFE Logging / Path Options. Mirroring. RPKI. Non Fatal X.509 error improvements + +Published 2018-08-20 10:11:04 +0000 UTC + +### CTFE + +`CTFE` no longer prints certificate chains as long byte strings in messages when handler errors occur. This was obscuring the reason for the failure and wasn't particularly useful. + +`CTFE` now has a global log URL path prefix flag and a configuration proto for a log specific path. The latter should help for various migration strategies if existing C++ server logs are going to be converted to run on the new code. + +### Mirroring + +More progress has been made on log mirroring. We believe that it's now at the point where testing can begin. + +### Utilities / Libraries + +The `certcheck` and `ct_hammer` utilities have received more enhancements. + +`x509` and `x509util` now support Subject Information Access and additional extensions for [RPKI / RFC 3779](https://www.ietf.org/rfc/rfc3779.txt). + +`scanner` / `fixchain` and some other command line utilities now have better handling of non-fatal errors. + +Commit [3629d6846518309d22c16fee15d1007262a459d2](https://api.github.com/repos/google/certificate-transparency-go/commits/3629d6846518309d22c16fee15d1007262a459d2) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.21) + +## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements + +Published 2018-07-05 09:21:34 +0000 UTC + +Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`. + +The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`. + +An implementation of Gossip has been added. See the `gossip/minimal` package for more information. + +An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10. + +Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20) + +## v1.0.19 - CTFE User Quota + +Published 2018-06-01 13:51:52 +0000 UTC + +CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains. + +Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19) + +## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config + +Published 2018-06-01 14:28:20 +0000 UTC + +Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs. + +The `RequestLog` API allows for logging of SCTs when they are issued by CTFE. + +The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package. + +Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18) + +## v1.0.17 - Merkle verification / Tracing / Demo script / CORS + +Published 2018-06-01 14:25:16 +0000 UTC + +Now uses Merkle Tree verification from Trillian. + +The CT server now supports CORS. + +Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ. + +A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project. + +Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17) + +## v1.0.16 - Lifecycle test / Go 1.10.1 + +Published 2018-06-01 14:22:23 +0000 UTC + +An integration test was added that goes through a create / drain queue / freeze lifecycle for a log. + +Changes to `x509` were merged from Go 1.10.1. + +Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16) + +## v1.0.15 - More control of verification, grpclb, stackdriver metrics + +Published 2018-06-01 14:20:32 +0000 UTC + +Facilities were added to the `x509` package to control whether verification checks are applied. + +Log server requests are now balanced using `gRPClb`. + +For Kubernetes, metrics can be published to Stackdriver monitoring. + +Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15) + +## v1.0.14 - SQLite Removed, LeafHashForLeaf + +Published 2018-06-01 14:15:37 +0000 UTC + +Support for SQLite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment. + +A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests. + +Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14) + +## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification + +Published 2018-06-01 14:15:21 +0000 UTC + +Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly. + +Updates were made to GCE ingress and health checks. + +The log list utility can verify signatures. + +Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13) + +## v1.0.12 - Client / util updates & CTFE fixes + +Published 2018-06-01 14:13:42 +0000 UTC + +The CT client can now use a JSON loglist to find logs. + +CTFE had a fix applied for preissued precerts. + +A DNS client was added and CT client was extended to support DNS retrieval. + +Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12) + +## v1.0.11 - Kubernetes CI / Integration fixes + +Published 2018-06-01 14:12:18 +0000 UTC + +Updates to Kubernetes configs, mostly related to running a CI instance. + +Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11) + +## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates + +Published 2018-06-01 14:09:47 +0000 UTC + +The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config. + +The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1. + +Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10) + +## v1.0.9 - Scanner, x509, utility and client fixes + +Published 2018-06-01 14:11:13 +0000 UTC + +The `scanner` utility now displays throughput stats. + +Build instructions and README files were updated. + +The `certcheck` utility can be told to ignore unknown critical X.509 extensions. + +Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9) + +## v1.0.8 - Client fixes, align with trillian repo + +Published 2018-06-01 14:06:44 +0000 UTC + + + +Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8) + +## v1.0.7 - CTFE fixes + +Published 2018-06-01 14:06:13 +0000 UTC + +An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed. + +Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7) + +## v1.0.6 - crlcheck improvements / other fixes + +Published 2018-06-01 14:04:22 +0000 UTC + +The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs. + +Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6) + +## v1.0.5 - X509 and asn1 fixes + +Published 2018-06-01 14:02:58 +0000 UTC + +This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilities were also updated. + +Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5) + +## v1.0.4 - Multi log backend configs + +Published 2018-06-01 14:02:07 +0000 UTC + +Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers. + +Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4) + +## v1.0.3 - Hammer updates, use standard context + +Published 2018-06-01 14:01:11 +0000 UTC + +After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on. + +Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3) + +## v1.0.2 - Go 1.9 + +Published 2018-06-01 14:00:00 +0000 UTC + +Go 1.9 is now required to build the code. + +Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2) + +## v1.0.1 - Hammer and client improvements + +Published 2018-06-01 13:59:29 +0000 UTC + + + +Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1) + +## v1.0 - First Trillian CT Release + +Published 2018-06-01 13:59:00 +0000 UTC + +This is the point that corresponds to the 1.0 release in the trillian repo. + +Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0) diff --git a/vendor/github.com/google/certificate-transparency-go/CODEOWNERS b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS new file mode 100644 index 00000000000..0c931e87ce2 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS @@ -0,0 +1 @@ +* @google/certificate-transparency diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md new file mode 100644 index 00000000000..43de4c9d470 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + +Once your CLA is submitted (or if you already submitted one for +another Google project), make a commit adding yourself to the +[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part +of your first [pull request][]. + +[AUTHORS]: AUTHORS +[CONTRIBUTORS]: CONTRIBUTORS + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS new file mode 100644 index 00000000000..3a98a7e1ef5 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS @@ -0,0 +1,62 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. +# +# Names should be added to this file as: +# Name +# +# Please keep the list sorted. + +Adam Eijdenberg +Al Cutter +Alex Cohn +Ben Laurie +Chris Kennelly +David Drysdale +Deyan Bektchiev +Ed Maste +Elisha Silas +Emilia Kasper +Eran Messeri +Fiaz Hossain +Gary Belvin +Jeff Trawick +Joe Tsai +Kat Joyce +Katriel Cohn-Gordon +Kiril Nikolov +Konrad Kraszewski +Laël Cellier +Linus Nordberg +Mark Schloesser +Nicholas Galbreath +Oliver Weidner +Pascal Leroy +Paul Hadfield +Paul Lietar +Pavel Kalinnikov +Pierre Phaneuf +Rob Percival +Rob Stradling +Roger Ng +Roland Shoemaker +Ruslan Kovalov +Samuel Lidén Borell +Tatiana Merkulova +Vladimir Rutsky +Ximin Luo diff --git a/vendor/github.com/google/certificate-transparency-go/LICENSE b/vendor/github.com/google/certificate-transparency-go/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..c3c0feb3abc --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,16 @@ + + +### Checklist + + + +- [ ] I have updated the [CHANGELOG](CHANGELOG.md). + - Adjust the draft version number according to [semantic versioning](https://semver.org/) rules. +- [ ] I have updated [documentation](docs/) accordingly. diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md new file mode 100644 index 00000000000..bade700508e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/README.md @@ -0,0 +1,120 @@ +# Certificate Transparency: Go Code + +[![Go Report Card](https://goreportcard.com/badge/github.com/google/certificate-transparency-go)](https://goreportcard.com/report/github.com/google/certificate-transparency-go) +[![GoDoc](https://godoc.org/github.com/google/certificate-transparency-go?status.svg)](https://godoc.org/github.com/google/certificate-transparency-go) +![CodeQL workflow](https://github.com/google/certificate-transparency-go/actions/workflows/codeql.yml/badge.svg) + +This repository holds Go code related to +[Certificate Transparency](https://www.certificate-transparency.org/) (CT). The +repository requires Go version 1.23. + + - [Repository Structure](#repository-structure) + - [Trillian CT Personality](#trillian-ct-personality) + - [Working on the Code](#working-on-the-code) + - [Running Codebase Checks](#running-codebase-checks) + - [Rebuilding Generated Code](#rebuilding-generated-code) + +## Support + +- Slack: https://transparency-dev.slack.com/ ([invitation](https://join.slack.com/t/transparency-dev/shared_invite/zt-27pkqo21d-okUFhur7YZ0rFoJVIOPznQ)) + +## Repository Structure + +The main parts of the repository are: + + - Encoding libraries: + - `asn1/` and `x509/` are forks of the upstream Go `encoding/asn1` and + `crypto/x509` libraries. We maintain separate forks of these packages + because CT is intended to act as an observatory of certificates across the + ecosystem; as such, we need to be able to process somewhat-malformed + certificates that the stricter upstream code would (correctly) reject. + Our `x509` fork also includes code for working with the + [pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1). + - `tls` holds a library for processing TLS-encoded data as described in + [RFC 5246](https://tools.ietf.org/html/rfc5246). + - `x509util/` provides additional utilities for dealing with + `x509.Certificate`s. + - CT client libraries: + - The top-level `ct` package (in `.`) holds types and utilities for working + with CT data structures defined in + [RFC 6962](https://tools.ietf.org/html/rfc6962). + - `client/` and `jsonclient/` hold libraries that allow access to CT Logs + via HTTP entrypoints described in + [section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4). + - `dnsclient/` has a library that allows access to CT Logs over + [DNS](https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md). + - `scanner/` holds a library for scanning the entire contents of an existing + CT Log. + - CT Personality for [Trillian](https://github.com/google/trillian): + - `trillian/` holds code that allows a Certificate Transparency Log to be + run using a Trillian Log as its back-end -- see + [below](#trillian-ct-personality). + - Command line tools: + - `./client/ctclient` allows interaction with a CT Log. + - `./ctutil/sctcheck` allows SCTs (signed certificate timestamps) from a CT + Log to be verified. + - `./scanner/scanlog` allows an existing CT Log to be scanned for certificates + of interest; please be polite when running this tool against a Log. + - `./x509util/certcheck` allows display and verification of certificates + - `./x509util/crlcheck` allows display and verification of certificate + revocation lists (CRLs). + - Other libraries related to CT: + - `ctutil/` holds utility functions for validating and verifying CT data + structures. + - `loglist3/` has a library for reading + [v3 JSON lists of CT Logs](https://groups.google.com/a/chromium.org/g/ct-policy/c/IdbrdAcDQto/m/i5KPyzYwBAAJ). + + +## Trillian CT Personality + +The `trillian/` subdirectory holds code and scripts for running a CT Log based +on the [Trillian](https://github.com/google/trillian) general transparency Log, +and is [documented separately](trillian/README.md). + + +## Working on the Code + +Developers who want to make changes to the codebase need some additional +dependencies and tools, described in the following sections. + +### Running Codebase Checks + +The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools +and tests over the codebase; please ensure this script passes before sending +pull requests for review. + +```bash +# Install golangci-lint +go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 + +# Run code generation, build, test and linters +./scripts/presubmit.sh + +# Run build, test and linters but skip code generation +./scripts/presubmit.sh --no-generate + +# Or just run the linters alone: +golangci-lint run +``` + +### Rebuilding Generated Code + +Some of the CT Go code is autogenerated from other files: + +- [Protocol buffer](https://developers.google.com/protocol-buffers/) message + definitions are converted to `.pb.go` implementations. +- A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is + created with [GoMock](https://github.com/golang/mock). + +Re-generating mock or protobuffer files is only needed if you're changing +the original files; if you do, you'll need to install the prerequisites: + +- tools written in `go` can be installed with a single run of `go install` + (courtesy of [`tools.go`](./tools/tools.go) and `go.mod`). +- `protoc` tool: you'll need [version 3.20.1](https://github.com/protocolbuffers/protobuf/releases/tag/v3.20.1) installed, and `PATH` updated to include its `bin/` directory. + +With tools installed, run the following: + +```bash +go generate -x ./... # hunts for //go:generate comments and runs them +``` diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/README.md b/vendor/github.com/google/certificate-transparency-go/asn1/README.md new file mode 100644 index 00000000000..a42ac4ebe33 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/README.md @@ -0,0 +1,7 @@ +# Important Notice + +This is a fork of the `encoding/asn1` Go package. The original source can be found on +[GitHub](https://github.com/golang/go). + +Be careful about making local modifications to this code as it will +make maintenance harder in future. diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go new file mode 100644 index 00000000000..aaca5fd2606 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go @@ -0,0 +1,1195 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also “A Layman's Guide to a Subset of ASN.1, BER, and DER,” +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +// +// This is a fork of the Go standard library ASN.1 implementation +// (encoding/asn1), with the aim of relaxing checks for various things +// that are common errors present in many X.509 certificates in the +// wild. +// +// Main differences: +// - Extra "lax" tag that recursively applies and relaxes some strict +// checks: +// - parsePrintableString() copes with invalid PrintableString contents, +// e.g. use of tagPrintableString when the string data is really +// ISO8859-1. +// - checkInteger() allows integers that are not minimally encoded (and +// so are not correct DER). +// - parseObjectIdentifier() allows zero-length OIDs. +// - Better diagnostics on which particular field causes errors. +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + "errors" + "fmt" + "math" + "math/big" + "reflect" + "strconv" + "time" + "unicode/utf16" + "unicode/utf8" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string + Field string +} + +func (e StructuralError) Error() string { + var prefix string + if e.Field != "" { + prefix = e.Field + ": " + } + return "asn1: structure error: " + prefix + e.Msg +} + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string + Field string +} + +func (e SyntaxError) Error() string { + var prefix string + if e.Field != "" { + prefix = e.Field + ": " + } + return "asn1: syntax error: " + prefix + e.Msg +} + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte, fieldName string) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean", fieldName} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean", fieldName} + } + + return +} + +// INTEGER + +// checkInteger returns nil if the given bytes are a valid DER-encoded +// INTEGER and an error otherwise. +func checkInteger(bytes []byte, lax bool, fieldName string) error { + if len(bytes) == 0 { + return StructuralError{"empty integer", fieldName} + } + if len(bytes) == 1 { + return nil + } + if lax { + return nil + } + if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { + return StructuralError{"integer not minimally-encoded", fieldName} + } + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte, lax bool, fieldName string) (ret int64, err error) { + err = checkInteger(bytes, lax, fieldName) + if err != nil { + return + } + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large", fieldName} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte, lax bool, fieldName string) (int32, error) { + if err := checkInteger(bytes, lax, fieldName); err != nil { + return 0, err + } + ret64, err := parseInt64(bytes, lax, fieldName) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large", fieldName} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) { + if err := checkInteger(bytes, lax, fieldName); err != nil { + return nil, err + } + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret, nil + } + ret.SetBytes(bytes) + return ret, nil +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte, fieldName string) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING", fieldName} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0 { + s += "." + } + s += strconv.Itoa(v) + } + + return s +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte, lax bool, fieldName string) (s ObjectIdentifier, err error) { + if len(bytes) == 0 { + if lax { + return ObjectIdentifier{}, nil + } + err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName} + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + v, offset, err := parseBase128Int(bytes, 0, fieldName) + if err != nil { + return + } + if v < 80 { + s[0] = v / 40 + s[1] = v % 40 + } else { + s[0] = 2 + s[1] = v - 80 + } + + i := 2 + for ; offset < len(bytes); i++ { + v, offset, err = parseBase128Int(bytes, offset, fieldName) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// ENUMERATED + +// An Enumerated is represented as a plain int. +type Enumerated int + +// FLAG + +// A Flag accepts any data and is set to true if present. +type Flag bool + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int, fieldName string) (ret, offset int, err error) { + offset = initOffset + var ret64 int64 + for shifted := 0; offset < len(bytes); shifted++ { + // 5 * 7 bits per byte == 35 bits of data + // Thus the representation is either non-minimal or too large for an int32 + if shifted == 5 { + err = StructuralError{"base 128 integer too large", fieldName} + return + } + ret64 <<= 7 + b := bytes[offset] + ret64 |= int64(b & 0x7f) + offset++ + if b&0x80 == 0 { + ret = int(ret64) + // Ensure that the returned value fits in an int on all platforms + if ret64 > math.MaxInt32 { + err = StructuralError{"base 128 integer too large", fieldName} + } + return + } + } + err = SyntaxError{"truncated base 128 integer", fieldName} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + + formatStr := "0601021504Z0700" + ret, err = time.Parse(formatStr, s) + if err != nil { + formatStr = "060102150405Z0700" + ret, err = time.Parse(formatStr, s) + } + if err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + return + } + + if ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + const formatStr = "20060102150405Z0700" + s := string(bytes) + + if ret, err = time.Parse(formatStr, s); err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + } + + return +} + +// NumericString + +// parseNumericString parses an ASN.1 NumericString from the given byte array +// and returns it. +func parseNumericString(bytes []byte, fieldName string) (ret string, err error) { + for _, b := range bytes { + if !isNumeric(b) { + return "", SyntaxError{"NumericString contains invalid character", fieldName} + } + } + return string(bytes), nil +} + +// isNumeric reports whether the given b is in the ASN.1 NumericString set. +func isNumeric(b byte) bool { + return '0' <= b && b <= '9' || + b == ' ' +} + +// PrintableString + +// parsePrintableString parses an ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte, lax bool, fieldName string) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b, allowAsterisk, allowAmpersand) { + if !lax { + err = SyntaxError{"PrintableString contains invalid character", fieldName} + } else { + // Might be an ISO8859-1 string stuffed in, check if it + // would be valid and assume that's what's happened if so, + // otherwise try T.61, failing that give up and just assign + // the bytes + switch { + case couldBeISO8859_1(bytes): + ret, err = iso8859_1ToUTF8(bytes), nil + case couldBeT61(bytes): + ret, err = parseT61String(bytes) + default: + err = SyntaxError{"PrintableString contains invalid character, couldn't determine correct String type", fieldName} + } + } + return + } + } + ret = string(bytes) + return +} + +type asteriskFlag bool +type ampersandFlag bool + +const ( + allowAsterisk asteriskFlag = true + rejectAsterisk asteriskFlag = false + + allowAmpersand ampersandFlag = true + rejectAmpersand ampersandFlag = false +) + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +// If asterisk is allowAsterisk then '*' is also allowed, reflecting existing +// practice. If ampersand is allowAmpersand then '&' is allowed as well. +func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + (bool(asterisk) && b == '*') || + // This is not technically allowed either. However, not + // only is it relatively common, but there are also a + // handful of CA certificates that contain it. At least + // one of which will not expire until 2027. + (bool(ampersand) && b == '&') +} + +// IA5String + +// parseIA5String parses an ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte, fieldName string) (ret string, err error) { + for _, b := range bytes { + if b >= utf8.RuneSelf { + err = SyntaxError{"IA5String contains invalid character", fieldName} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses an ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + if !utf8.Valid(bytes) { + return "", errors.New("asn1: invalid UTF-8 string") + } + return string(bytes), nil +} + +// BMPString + +// parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of +// ISO/IEC/ITU 10646-1) from the given byte slice and returns it. +func parseBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // Strip terminator if present. + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int, fieldName string) (ret tagAndLength, offset int, err error) { + offset = initOffset + // parseTagAndLength should not be called without at least a single + // byte to read. Thus this check is for robustness: + if offset >= len(bytes) { + err = errors.New("asn1: internal error in parseTagAndLength") + return + } + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset, fieldName) + if err != nil { + return + } + // Tags should be encoded in minimal form. + if ret.tag < 0x1f { + err = SyntaxError{"non-minimal tag", fieldName} + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length", fieldName} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)", fieldName} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length", fieldName} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large", fieldName} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length", fieldName} + return + } + } + // Short lengths must be encoded in short form. + if ret.length < 0x80 { + err = StructuralError{"non-minimal length", fieldName} + return + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, lax bool, fieldName string) (ret reflect.Value, err error) { + matchAny, expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice", fieldName} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset, fieldName) + if err != nil { + return + } + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString: + // We pretend that various other string types are + // PRINTABLE STRINGs so that a sequence of them can be + // parsed into a []string. + t.tag = TagPrintableString + case TagGeneralizedTime, TagUTCTime: + // Likewise, both time types are treated the same. + t.tag = TagUTCTime + } + + if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) { + err = StructuralError{fmt.Sprintf("sequence tag mismatch (got:%+v, want:0/%d/%t)", t, expectedTag, compoundType), fieldName} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence", fieldName} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{lax: lax} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength reports whether offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// Tests whether the data in |bytes| would be a valid ISO8859-1 string. +// Clearly, a sequence of bytes comprised solely of valid ISO8859-1 +// codepoints does not imply that the encoding MUST be ISO8859-1, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeISO8859_1(bytes []byte) bool { + for _, b := range bytes { + if b < 0x20 || (b >= 0x7F && b < 0xA0) { + return false + } + } + return true +} + +// Checks whether the data in |bytes| would be a valid T.61 string. +// Clearly, a sequence of bytes comprised solely of valid T.61 +// codepoints does not imply that the encoding MUST be T.61, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeT61(bytes []byte) bool { + for _, b := range bytes { + switch b { + case 0x00: + // Since we're guessing at (incorrect) encodings for a + // PrintableString, we'll err on the side of caution and disallow + // strings with a NUL in them, don't want to re-create a PayPal NUL + // situation in monitors. + fallthrough + case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF, + 0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, + 0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF: + // These are all invalid code points in T.61, so it can't be a T.61 string. + return false + } + } + return true +} + +// Converts the data in |bytes| to the equivalent UTF-8 string. +func iso8859_1ToUTF8(bytes []byte) string { + buf := make([]rune, len(bytes)) + for i, b := range bytes { + buf[i] = rune(b) + } + return string(buf) +} + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated", params.name} + } + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated", params.name} + return + } + var result interface{} + if !t.isCompound && t.class == ClassUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case TagPrintableString: + result, err = parsePrintableString(innerBytes, params.lax, params.name) + case TagNumericString: + result, err = parseNumericString(innerBytes, params.name) + case TagIA5String: + result, err = parseIA5String(innerBytes, params.name) + case TagT61String: + result, err = parseT61String(innerBytes) + case TagUTF8String: + result, err = parseUTF8String(innerBytes) + case TagInteger: + result, err = parseInt64(innerBytes, params.lax, params.name) + case TagBitString: + result, err = parseBitString(innerBytes, params.name) + case TagOID: + result, err = parseObjectIdentifier(innerBytes, params.lax, params.name) + case TagUTCTime: + result, err = parseUTCTime(innerBytes) + case TagGeneralizedTime: + result, err = parseGeneralizedTime(innerBytes) + case TagOctetString: + result = innerBytes + case TagBMPString: + result, err = parseBMPString(innerBytes) + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + + t, offset, err := parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + if params.explicit { + expectedClass := ClassContextSpecific + if params.application { + expectedClass = ClassApplication + } + if offset == len(bytes) { + err = StructuralError{"explicit tag has no child", params.name} + return + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if fieldType == rawValueType { + // The inner element should not be parsed for RawValues. + } else if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag", params.name} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match", params.name} + } + return + } + } + + matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType), params.name} + return + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == TagPrintableString { + if t.class == ClassUniversal { + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { + universalTag = TagGeneralizedTime + } + + if params.set { + universalTag = TagSet + } + + matchAnyClassAndTag := matchAny + expectedClass := ClassUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = ClassContextSpecific + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = ClassApplication + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + if !params.explicit && params.private && params.tag != nil { + expectedClass = ClassPrivate + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + // We have unwrapped any explicit tagging at this point. + if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) || + (!matchAny && t.isCompound != compoundType) { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset), params.name} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated", params.name} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case rawValueType: + result := RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]} + v.Set(reflect.ValueOf(result)) + return + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes, params.lax, params.name) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes, params.name) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == TagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt, err1 := parseBigInt(innerBytes, params.lax, params.name) + if err1 == nil { + v.Set(reflect.ValueOf(parsedInt)) + } + err = err1 + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes, params.name) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes, params.lax, params.name) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + for i := 0; i < structType.NumField(); i++ { + if structType.Field(i).PkgPath != "" { + err = StructuralError{"struct contains unexported fields", structType.Field(i).Name} + return + } + } + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerParams := parseFieldParameters(field.Tag.Get("asn1")) + innerParams.name = field.Name + innerParams.lax = params.lax + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.lax, params.name) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case TagPrintableString: + v, err = parsePrintableString(innerBytes, params.lax, params.name) + case TagNumericString: + v, err = parseNumericString(innerBytes, params.name) + case TagIA5String: + v, err = parseIA5String(innerBytes, params.name) + case TagT61String: + v, err = parseT61String(innerBytes) + case TagUTF8String: + v, err = parseUTF8String(innerBytes) + case TagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + case TagBMPString: + v, err = parseBMPString(innerBytes) + + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag), params.name} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String(), params.name} + return +} + +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful if the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString, IA5String, or NumericString can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// application specifies that an APPLICATION tag is used +// private specifies that a PRIVATE tag is used +// default:x sets the default value for optional integer fields (only used if optional is also present) +// explicit specifies that an additional, explicit tag wraps the implicit one +// optional marks the field as ASN.1 OPTIONAL +// set causes a SET, rather than a SEQUENCE type to be expected +// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// lax relax strict encoding checks for this field, and for any fields within it +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// If the type name of a slice element ends with "SET" then it's treated as if +// the "set" tag was set on it. This can be used with nested slices where a +// struct tag cannot be given. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/common.go b/vendor/github.com/google/certificate-transparency-go/asn1/common.go new file mode 100644 index 00000000000..982d06c09ed --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/common.go @@ -0,0 +1,187 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +// ASN.1 tags represent the type of the following object. +const ( + TagBoolean = 1 + TagInteger = 2 + TagBitString = 3 + TagOctetString = 4 + TagNull = 5 + TagOID = 6 + TagEnum = 10 + TagUTF8String = 12 + TagSequence = 16 + TagSet = 17 + TagNumericString = 18 + TagPrintableString = 19 + TagT61String = 20 + TagIA5String = 22 + TagUTCTime = 23 + TagGeneralizedTime = 24 + TagGeneralString = 27 + TagBMPString = 30 +) + +// ASN.1 class types represent the namespace of the tag. +const ( + ClassUniversal = 0 + ClassApplication = 1 + ClassContextSpecific = 2 + ClassPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + private bool // true iff a PRIVATE tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + timeType int // the time tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + lax bool // true iff unmarshalling should skip some error checks + name string // name of field for better diagnostics + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "generalized": + ret.timeType = TagGeneralizedTime + case part == "utc": + ret.timeType = TagUTCTime + case part == "ia5": + ret.stringType = TagIA5String + case part == "printable": + ret.stringType = TagPrintableString + case part == "numeric": + ret.stringType = TagNumericString + case part == "utf8": + ret.stringType = TagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "private": + ret.private = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + case part == "lax": + ret.lax = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (matchAny bool, tagNumber int, isCompound, ok bool) { + switch t { + case rawValueType: + return true, -1, false, true + case objectIdentifierType: + return false, TagOID, false, true + case bitStringType: + return false, TagBitString, false, true + case timeType: + return false, TagUTCTime, false, true + case enumeratedType: + return false, TagEnum, false, true + case bigIntType: + return false, TagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return false, TagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return false, TagInteger, false, true + case reflect.Struct: + return false, TagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return false, TagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return false, TagSet, true, true + } + return false, TagSequence, true, true + case reflect.String: + return false, TagPrintableString, false, true + } + return false, 0, false, false +} diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go new file mode 100644 index 00000000000..9801b065a18 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go @@ -0,0 +1,691 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +var ( + byte00Encoder encoder = byteEncoder(0x00) + byteFFEncoder encoder = byteEncoder(0xff) +) + +// encoder represents an ASN.1 element that is waiting to be marshaled. +type encoder interface { + // Len returns the number of bytes needed to marshal this element. + Len() int + // Encode encodes this element by writing Len() bytes to dst. + Encode(dst []byte) +} + +type byteEncoder byte + +func (c byteEncoder) Len() int { + return 1 +} + +func (c byteEncoder) Encode(dst []byte) { + dst[0] = byte(c) +} + +type bytesEncoder []byte + +func (b bytesEncoder) Len() int { + return len(b) +} + +func (b bytesEncoder) Encode(dst []byte) { + if copy(dst, b) != len(b) { + panic("internal error") + } +} + +type stringEncoder string + +func (s stringEncoder) Len() int { + return len(s) +} + +func (s stringEncoder) Encode(dst []byte) { + if copy(dst, s) != len(s) { + panic("internal error") + } +} + +type multiEncoder []encoder + +func (m multiEncoder) Len() int { + var size int + for _, e := range m { + size += e.Len() + } + return size +} + +func (m multiEncoder) Encode(dst []byte) { + var off int + for _, e := range m { + e.Encode(dst[off:]) + off += e.Len() + } +} + +type taggedEncoder struct { + // scratch contains temporary space for encoding the tag and length of + // an element in order to avoid extra allocations. + scratch [8]byte + tag encoder + body encoder +} + +func (t *taggedEncoder) Len() int { + return t.tag.Len() + t.body.Len() +} + +func (t *taggedEncoder) Encode(dst []byte) { + t.tag.Encode(dst) + t.body.Encode(dst[t.tag.Len():]) +} + +type int64Encoder int64 + +func (i int64Encoder) Len() int { + n := 1 + + for i > 127 { + n++ + i >>= 8 + } + + for i < -128 { + n++ + i >>= 8 + } + + return n +} + +func (i int64Encoder) Encode(dst []byte) { + n := i.Len() + + for j := 0; j < n; j++ { + dst[j] = byte(i >> uint((n-1-j)*8)) + } +} + +func base128IntLength(n int64) int { + if n == 0 { + return 1 + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + return l +} + +func appendBase128Int(dst []byte, n int64) []byte { + l := base128IntLength(n) + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + dst = append(dst, o) + } + + return dst +} + +func makeBigInt(n *big.Int, fieldName string) (encoder, error) { + if n == nil { + return nil, StructuralError{"empty integer", fieldName} + } + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll invert and subtract 1. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + return multiEncoder([]encoder{byteFFEncoder, bytesEncoder(bytes)}), nil + } + return bytesEncoder(bytes), nil + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + return byte00Encoder, nil + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + return multiEncoder([]encoder{byte00Encoder, bytesEncoder(bytes)}), nil + } + return bytesEncoder(bytes), nil + } +} + +func appendLength(dst []byte, i int) []byte { + n := lengthLength(i) + + for ; n > 0; n-- { + dst = append(dst, byte(i>>uint((n-1)*8))) + } + + return dst +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func appendTagAndLength(dst []byte, t tagAndLength) []byte { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + dst = append(dst, b) + dst = appendBase128Int(dst, int64(t.tag)) + } else { + b |= uint8(t.tag) + dst = append(dst, b) + } + + if t.length >= 128 { + l := lengthLength(t.length) + dst = append(dst, 0x80|byte(l)) + dst = appendLength(dst, t.length) + } else { + dst = append(dst, byte(t.length)) + } + + return dst +} + +type bitStringEncoder BitString + +func (b bitStringEncoder) Len() int { + return len(b.Bytes) + 1 +} + +func (b bitStringEncoder) Encode(dst []byte) { + dst[0] = byte((8 - b.BitLength%8) % 8) + if copy(dst[1:], b.Bytes) != len(b.Bytes) { + panic("internal error") + } +} + +type oidEncoder []int + +func (oid oidEncoder) Len() int { + l := base128IntLength(int64(oid[0]*40 + oid[1])) + for i := 2; i < len(oid); i++ { + l += base128IntLength(int64(oid[i])) + } + return l +} + +func (oid oidEncoder) Encode(dst []byte) { + dst = appendBase128Int(dst[:0], int64(oid[0]*40+oid[1])) + for i := 2; i < len(oid); i++ { + dst = appendBase128Int(dst, int64(oid[i])) + } +} + +func makeObjectIdentifier(oid []int, fieldName string) (e encoder, err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return nil, StructuralError{"invalid object identifier", fieldName} + } + + return oidEncoder(oid), nil +} + +func makePrintableString(s, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + // The asterisk is often used in PrintableString, even though + // it is invalid. If a PrintableString was specifically + // requested then the asterisk is permitted by this code. + // Ampersand is allowed in parsing due a handful of CA + // certificates, however when making new certificates + // it is rejected. + if !isPrintable(s[i], allowAsterisk, rejectAmpersand) { + return nil, StructuralError{"PrintableString contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeIA5String(s, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + if s[i] > 127 { + return nil, StructuralError{"IA5String contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeNumericString(s string, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + if !isNumeric(s[i]) { + return nil, StructuralError{"NumericString contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeUTF8String(s string) encoder { + return stringEncoder(s) +} + +func appendTwoDigits(dst []byte, v int) []byte { + return append(dst, byte('0'+(v/10)%10), byte('0'+v%10)) +} + +func appendFourDigits(dst []byte, v int) []byte { + var bytes [4]byte + for i := range bytes { + bytes[3-i] = '0' + byte(v%10) + v /= 10 + } + return append(dst, bytes[:]...) +} + +func outsideUTCRange(t time.Time) bool { + year := t.Year() + return year < 1950 || year >= 2050 +} + +func makeUTCTime(t time.Time, fieldName string) (e encoder, err error) { + dst := make([]byte, 0, 18) + + dst, err = appendUTCTime(dst, t, fieldName) + if err != nil { + return nil, err + } + + return bytesEncoder(dst), nil +} + +func makeGeneralizedTime(t time.Time, fieldName string) (e encoder, err error) { + dst := make([]byte, 0, 20) + + dst, err = appendGeneralizedTime(dst, t, fieldName) + if err != nil { + return nil, err + } + + return bytesEncoder(dst), nil +} + +func appendUTCTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) { + year := t.Year() + + switch { + case 1950 <= year && year < 2000: + dst = appendTwoDigits(dst, year-1900) + case 2000 <= year && year < 2050: + dst = appendTwoDigits(dst, year-2000) + default: + return nil, StructuralError{"cannot represent time as UTCTime", fieldName} + } + + return appendTimeCommon(dst, t), nil +} + +func appendGeneralizedTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) { + year := t.Year() + if year < 0 || year > 9999 { + return nil, StructuralError{"cannot represent time as GeneralizedTime", fieldName} + } + + dst = appendFourDigits(dst, year) + + return appendTimeCommon(dst, t), nil +} + +func appendTimeCommon(dst []byte, t time.Time) []byte { + _, month, day := t.Date() + + dst = appendTwoDigits(dst, int(month)) + dst = appendTwoDigits(dst, day) + + hour, min, sec := t.Clock() + + dst = appendTwoDigits(dst, hour) + dst = appendTwoDigits(dst, min) + dst = appendTwoDigits(dst, sec) + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + return append(dst, 'Z') + case offset > 0: + dst = append(dst, '+') + case offset < 0: + dst = append(dst, '-') + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + dst = appendTwoDigits(dst, offsetMinutes/60) + dst = appendTwoDigits(dst, offsetMinutes%60) + + return dst +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0, "") + if err != nil { + return in + } + return in[offset:] +} + +func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error) { + switch value.Type() { + case flagType: + return bytesEncoder(nil), nil + case timeType: + t := value.Interface().(time.Time) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { + return makeGeneralizedTime(t, params.name) + } + return makeUTCTime(t, params.name) + case bitStringType: + return bitStringEncoder(value.Interface().(BitString)), nil + case objectIdentifierType: + return makeObjectIdentifier(value.Interface().(ObjectIdentifier), params.name) + case bigIntType: + return makeBigInt(value.Interface().(*big.Int), params.name) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return byteFFEncoder, nil + } + return byte00Encoder, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64Encoder(v.Int()), nil + case reflect.Struct: + t := v.Type() + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).PkgPath != "" { + return nil, StructuralError{"struct contains unexported fields", t.Field(i).Name} + } + } + + startingField := 0 + + n := t.NumField() + if n == 0 { + return bytesEncoder(nil), nil + } + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := s.Bytes() + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + return bytesEncoder(stripTagAndLength(bytes)), nil + } + + startingField = 1 + } + + switch n1 := n - startingField; n1 { + case 0: + return bytesEncoder(nil), nil + case 1: + return makeField(v.Field(startingField), parseFieldParameters(t.Field(startingField).Tag.Get("asn1"))) + default: + m := make([]encoder, n1) + for i := 0; i < n1; i++ { + m[i], err = makeField(v.Field(i+startingField), parseFieldParameters(t.Field(i+startingField).Tag.Get("asn1"))) + if err != nil { + return nil, err + } + } + + return multiEncoder(m), nil + } + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + return bytesEncoder(v.Bytes()), nil + } + + var fp fieldParameters + + switch l := v.Len(); l { + case 0: + return bytesEncoder(nil), nil + case 1: + return makeField(v.Index(0), fp) + default: + m := make([]encoder, l) + + for i := 0; i < l; i++ { + m[i], err = makeField(v.Index(i), fp) + if err != nil { + return nil, err + } + } + + return multiEncoder(m), nil + } + case reflect.String: + switch params.stringType { + case TagIA5String: + return makeIA5String(v.String(), params.name) + case TagPrintableString: + return makePrintableString(v.String(), params.name) + case TagNumericString: + return makeNumericString(v.String(), params.name) + default: + return makeUTF8String(v.String()), nil + } + } + + return nil, StructuralError{"unknown Go type", params.name} +} + +func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) { + if !v.IsValid() { + return nil, fmt.Errorf("asn1: cannot marshal nil value") + } + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return makeField(v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return bytesEncoder(nil), nil + } + + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return bytesEncoder(nil), nil + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behavior, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return bytesEncoder(nil), nil + } + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + return bytesEncoder(rv.FullBytes), nil + } + + t := new(taggedEncoder) + + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})) + t.body = bytesEncoder(rv.Bytes) + + return t, nil + } + + matchAny, tag, isCompound, ok := getUniversalType(v.Type()) + if !ok || matchAny { + return nil, StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type()), params.name} + } + + if params.timeType != 0 && tag != TagUTCTime { + return nil, StructuralError{"explicit time type given to non-time member", params.name} + } + + if params.stringType != 0 && tag != TagPrintableString { + return nil, StructuralError{"explicit string type given to non-string member", params.name} + } + + switch tag { + case TagPrintableString: + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r), rejectAsterisk, rejectAmpersand) { + if !utf8.ValidString(v.String()) { + return nil, errors.New("asn1: string not valid UTF-8") + } + tag = TagUTF8String + break + } + } + } else { + tag = params.stringType + } + case TagUTCTime: + if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + tag = TagGeneralizedTime + } + } + + if params.set { + if tag != TagSequence { + return nil, StructuralError{"non sequence tagged as set", params.name} + } + tag = TagSet + } + + t := new(taggedEncoder) + + t.body, err = makeBody(v, params) + if err != nil { + return nil, err + } + + bodyLen := t.body.Len() + + class := ClassUniversal + if params.tag != nil { + if params.application { + class = ClassApplication + } else if params.private { + class = ClassPrivate + } else { + class = ClassContextSpecific + } + + if params.explicit { + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{ClassUniversal, tag, bodyLen, isCompound})) + + tt := new(taggedEncoder) + + tt.body = t + + tt.tag = bytesEncoder(appendTagAndLength(tt.scratch[:0], tagAndLength{ + class: class, + tag: *params.tag, + length: bodyLen + t.tag.Len(), + isCompound: true, + })) + + return tt, nil + } + + // implicit tag. + tag = *params.tag + } + + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{class, tag, bodyLen, isCompound})) + + return t, nil +} + +// Marshal returns the ASN.1 encoding of val. +// +// In addition to the struct tags recognised by Unmarshal, the following can be +// used: +// +// ia5: causes strings to be marshaled as ASN.1, IA5String values +// omitempty: causes empty slices to be skipped +// printable: causes strings to be marshaled as ASN.1, PrintableString values +// utf8: causes strings to be marshaled as ASN.1, UTF8String values +// utc: causes time.Time to be marshaled as ASN.1, UTCTime values +// generalized: causes time.Time to be marshaled as ASN.1, GeneralizedTime values +func Marshal(val interface{}) ([]byte, error) { + return MarshalWithParams(val, "") +} + +// MarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func MarshalWithParams(val interface{}, params string) ([]byte, error) { + e, err := makeField(reflect.ValueOf(val), parseFieldParameters(params)) + if err != nil { + return nil, err + } + b := make([]byte, e.Len()) + e.Encode(b) + return b, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go new file mode 100644 index 00000000000..a2fed51d883 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go @@ -0,0 +1,278 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v3.20.1 +// source: client/configpb/multilog.proto + +package configpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +type TemporalLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *TemporalLogConfig) Reset() { + *x = TemporalLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TemporalLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TemporalLogConfig) ProtoMessage() {} + +func (x *TemporalLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TemporalLogConfig.ProtoReflect.Descriptor instead. +func (*TemporalLogConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{0} +} + +func (x *TemporalLogConfig) GetShard() []*LogShardConfig { + if x != nil { + return x.Shard + } + return nil +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +type LogShardConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // The log's public key in DER-encoded PKIX form. + PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"` + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + NotAfterStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"` + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + NotAfterLimit *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"` +} + +func (x *LogShardConfig) Reset() { + *x = LogShardConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogShardConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogShardConfig) ProtoMessage() {} + +func (x *LogShardConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogShardConfig.ProtoReflect.Descriptor instead. +func (*LogShardConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{1} +} + +func (x *LogShardConfig) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *LogShardConfig) GetPublicKeyDer() []byte { + if x != nil { + return x.PublicKeyDer + } + return nil +} + +func (x *LogShardConfig) GetNotAfterStart() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterStart + } + return nil +} + +func (x *LogShardConfig) GetNotAfterLimit() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterLimit + } + return nil +} + +var File_client_configpb_multilog_proto protoreflect.FileDescriptor + +var file_client_configpb_multilog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, + 0x62, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x11, 0x54, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0xd0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x6e, + 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x42, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x6c, 0x6f, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_client_configpb_multilog_proto_rawDescOnce sync.Once + file_client_configpb_multilog_proto_rawDescData = file_client_configpb_multilog_proto_rawDesc +) + +func file_client_configpb_multilog_proto_rawDescGZIP() []byte { + file_client_configpb_multilog_proto_rawDescOnce.Do(func() { + file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_configpb_multilog_proto_rawDescData) + }) + return file_client_configpb_multilog_proto_rawDescData +} + +var file_client_configpb_multilog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_client_configpb_multilog_proto_goTypes = []interface{}{ + (*TemporalLogConfig)(nil), // 0: configpb.TemporalLogConfig + (*LogShardConfig)(nil), // 1: configpb.LogShardConfig + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp +} +var file_client_configpb_multilog_proto_depIdxs = []int32{ + 1, // 0: configpb.TemporalLogConfig.shard:type_name -> configpb.LogShardConfig + 2, // 1: configpb.LogShardConfig.not_after_start:type_name -> google.protobuf.Timestamp + 2, // 2: configpb.LogShardConfig.not_after_limit:type_name -> google.protobuf.Timestamp + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_client_configpb_multilog_proto_init() } +func file_client_configpb_multilog_proto_init() { + if File_client_configpb_multilog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_client_configpb_multilog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemporalLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_client_configpb_multilog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogShardConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_client_configpb_multilog_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_client_configpb_multilog_proto_goTypes, + DependencyIndexes: file_client_configpb_multilog_proto_depIdxs, + MessageInfos: file_client_configpb_multilog_proto_msgTypes, + }.Build() + File_client_configpb_multilog_proto = out.File + file_client_configpb_multilog_proto_rawDesc = nil + file_client_configpb_multilog_proto_goTypes = nil + file_client_configpb_multilog_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto new file mode 100644 index 00000000000..0774c35e210 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto @@ -0,0 +1,45 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package configpb; + +option go_package = "github.com/google/certificate-transparency-go/client/multilog/configpb"; + +import "google/protobuf/timestamp.proto"; + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +message TemporalLogConfig { + repeated LogShardConfig shard = 1; +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +message LogShardConfig { + string uri = 1; + + // The log's public key in DER-encoded PKIX form. + bytes public_key_der = 2; + + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + google.protobuf.Timestamp not_after_start = 3; + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + google.protobuf.Timestamp not_after_limit = 4; +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/getentries.go b/vendor/github.com/google/certificate-transparency-go/client/getentries.go new file mode 100644 index 00000000000..103dc815803 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/getentries.go @@ -0,0 +1,68 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "errors" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done. +func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) { + if end < 0 { + return nil, errors.New("end should be >= 0") + } + if end < start { + return nil, errors.New("start should be <= end") + } + + params := map[string]string{ + "start": strconv.FormatInt(start, 10), + "end": strconv.FormatInt(end, 10), + } + + var resp ct.GetEntriesResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server +// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures. +// However, this does mean that any certificate parsing failures will cause a failure of the whole +// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke +// ct.LogEntryFromLeaf() on each individual entry. +func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) { + resp, err := c.GetRawEntries(ctx, start, end) + if err != nil { + return nil, err + } + entries := make([]ct.LogEntry, len(resp.Entries)) + for i, entry := range resp.Entries { + index := start + int64(i) + logEntry, err := ct.LogEntryFromLeaf(index, &entry) + if x509.IsFatal(err) { + return nil, err + } + entries[i] = *logEntry + } + return entries, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/logclient.go b/vendor/github.com/google/certificate-transparency-go/client/logclient.go new file mode 100644 index 00000000000..0e90c1077f4 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/logclient.go @@ -0,0 +1,227 @@ +// Copyright 2014 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a CT log client implementation and contains types and code +// for interacting with RFC6962-compliant CT Log instances. +// See http://tools.ietf.org/html/rfc6962 for details +package client + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/tls" +) + +// LogClient represents a client for a given CT Log instance +type LogClient struct { + jsonclient.JSONClient +} + +// CheckLogClient is an interface that allows (just) checking of various log contents. +type CheckLogClient interface { + BaseURI() string + GetSTH(context.Context) (*ct.SignedTreeHead, error) + GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) + GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) +} + +// New constructs a new LogClient instance. +// |uri| is the base URI of the CT log instance to interact with, e.g. +// https://ct.googleapis.com/pilot +// |hc| is the underlying client to be used for HTTP requests to the CT log. +// |opts| can be used to provide a custom logger interface and a public key +// for signature verification. +func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) { + logClient, err := jsonclient.New(uri, hc, opts) + if err != nil { + return nil, err + } + return &LogClient{*logClient}, err +} + +// RspError represents a server error including HTTP information. +type RspError = jsonclient.RspError + +// Attempts to add |chain| to the log, using the api end-point specified by +// |path|. If provided context expires before submission is complete an +// error will be returned. +func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + var resp ct.AddChainResponse + var req ct.AddChainRequest + for _, link := range chain { + req.Chain = append(req.Chain, link.Data) + } + + httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp) + if err != nil { + return nil, err + } + + var ds ct.DigitallySigned + if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } else if len(rest) > 0 { + return nil, RspError{ + Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + exts, err := base64.StdEncoding.DecodeString(resp.Extensions) + if err != nil { + return nil, RspError{ + Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + var logID ct.LogID + copy(logID.KeyID[:], resp.ID) + sct := &ct.SignedCertificateTimestamp{ + SCTVersion: resp.SCTVersion, + LogID: logID, + Timestamp: resp.Timestamp, + Extensions: ct.CTExtensions(exts), + Signature: ds, + } + if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sct, nil +} + +// AddChain adds the (DER represented) X509 |chain| to the log. +func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate |chain| to the log. +func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +// GetSTH retrieves the current STH from the log. +// Returns a populated SignedTreeHead, or a non-nil error (which may be of type +// RspError if a raw http.Response is available). +func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) { + var resp ct.GetSTHResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp) + if err != nil { + return nil, err + } + + sth, err := resp.ToSignedTreeHead() + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := c.VerifySTHSignature(*sth); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sth, nil +} + +// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is +// successful. +func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + return c.Verifier.VerifySTHSignature(sth) +} + +// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain. +func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp) + if err != nil { + return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err) + } + leaf.TimestampedEntry.Extensions = sct.Extensions + entry := ct.LogEntry{Leaf: *leaf} + return c.Verifier.VerifySCTSignature(sct, entry) +} + +// GetSTHConsistency retrieves the consistency proof between two snapshots. +func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) { + base10 := 10 + params := map[string]string{ + "first": strconv.FormatUint(first, base10), + "second": strconv.FormatUint(second, base10), + } + var resp ct.GetSTHConsistencyResponse + if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil { + return nil, err + } + return resp.Consistency, nil +} + +// GetProofByHash returns an audit path for the hash of an SCT. +func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) { + b64Hash := base64.StdEncoding.EncodeToString(hash) + base10 := 10 + params := map[string]string{ + "tree_size": strconv.FormatUint(treeSize, base10), + "hash": b64Hash, + } + var resp ct.GetProofByHashResponse + if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for a log. +func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + var resp ct.GetRootsResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp) + if err != nil { + return nil, err + } + var roots []ct.ASN1Cert + for _, cert64 := range resp.Certificates { + cert, err := base64.StdEncoding.DecodeString(cert64) + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + roots = append(roots, ct.ASN1Cert{Data: cert}) + } + return roots, nil +} + +// GetEntryAndProof returns a log entry and audit path for the index of a leaf. +func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) { + base10 := 10 + params := map[string]string{ + "leaf_index": strconv.FormatUint(index, base10), + "tree_size": strconv.FormatUint(treeSize, base10), + } + var resp ct.GetEntryAndProofResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/multilog.go b/vendor/github.com/google/certificate-transparency-go/client/multilog.go new file mode 100644 index 00000000000..afd75a6db4a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/multilog.go @@ -0,0 +1,223 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "net/http" + "os" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client/configpb" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/x509" + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" +) + +type interval struct { + lower *time.Time // nil => no lower bound + upper *time.Time // nil => no upper bound +} + +// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given +// filename, which should contain text-protobuf encoded configuration data. +func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) { + if len(filename) == 0 { + return nil, errors.New("log config filename empty") + } + + cfgBytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("failed to read log config: %v", err) + } + + var cfg configpb.TemporalLogConfig + if txtErr := prototext.Unmarshal(cfgBytes, &cfg); txtErr != nil { + if binErr := proto.Unmarshal(cfgBytes, &cfg); binErr != nil { + return nil, fmt.Errorf("failed to parse TemporalLogConfig from %q as text protobuf (%v) or binary protobuf (%v)", filename, txtErr, binErr) + } + } + + if len(cfg.Shard) == 0 { + return nil, errors.New("empty log config found") + } + return &cfg, nil +} + +// AddLogClient is an interface that allows adding certificates and pre-certificates to a log. +// Both LogClient and TemporalLogClient implement this interface, which allows users to +// commonize code for adding certs to normal/temporal logs. +type AddLogClient interface { + AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) +} + +// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log. +type TemporalLogClient struct { + Clients []*LogClient + intervals []interval +} + +// NewTemporalLogClient builds a new client for interacting with a temporal log. +// The provided config should be contiguous and chronological. +func NewTemporalLogClient(cfg *configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) { + if len(cfg.GetShard()) == 0 { + return nil, errors.New("empty config") + } + + overall, err := shardInterval(cfg.Shard[0]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err) + } + intervals := make([]interval, 0, len(cfg.Shard)) + intervals = append(intervals, overall) + for i := 1; i < len(cfg.Shard); i++ { + interval, err := shardInterval(cfg.Shard[i]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err) + } + if overall.upper == nil { + return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i) + } + if interval.lower == nil { + return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i) + } + if !interval.lower.Equal(*overall.upper) { + return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper) + } + overall.upper = interval.upper + intervals = append(intervals, interval) + } + clients := make([]*LogClient, 0, len(cfg.Shard)) + for i, shard := range cfg.Shard { + opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"} + opts.PublicKeyDER = shard.GetPublicKeyDer() + c, err := New(shard.Uri, hc, opts) + if err != nil { + return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err) + } + clients = append(clients, c) + } + tlc := TemporalLogClient{ + Clients: clients, + intervals: intervals, + } + return &tlc, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for all +// of the shards of a temporal log (i.e. the union). +func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + type result struct { + roots []ct.ASN1Cert + err error + } + results := make(chan result, len(tlc.Clients)) + for _, c := range tlc.Clients { + go func(c *LogClient) { + var r result + r.roots, r.err = c.GetAcceptedRoots(ctx) + results <- r + }(c) + } + + var allRoots []ct.ASN1Cert + seen := make(map[[sha256.Size]byte]bool) + for range tlc.Clients { + r := <-results + if r.err != nil { + return nil, r.err + } + for _, root := range r.roots { + h := sha256.Sum256(root.Data) + if seen[h] { + continue + } + seen[h] = true + allRoots = append(allRoots, root) + } + } + return allRoots, nil +} + +// AddChain adds the (DER represented) X509 chain to the appropriate log. +func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log. +func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + // Parse the first entry in the chain + if len(chain) == 0 { + return nil, errors.New("missing chain") + } + cert, err := x509.ParseCertificate(chain[0].Data) + if err != nil { + return nil, fmt.Errorf("failed to parse initial chain entry: %v", err) + } + cidx, err := tlc.IndexByDate(cert.NotAfter) + if err != nil { + return nil, fmt.Errorf("failed to find log to process cert: %v", err) + } + return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain) +} + +// IndexByDate returns the index of the Clients entry that is appropriate for the given +// date. +func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) { + for i, interval := range tlc.intervals { + if (interval.lower != nil) && when.Before(*interval.lower) { + continue + } + if (interval.upper != nil) && !when.Before(*interval.upper) { + continue + } + return i, nil + } + return -1, fmt.Errorf("no log found encompassing date %v", when) +} + +func shardInterval(cfg *configpb.LogShardConfig) (interval, error) { + var interval interval + if cfg.NotAfterStart != nil { + if err := cfg.NotAfterStart.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err) + } + t := cfg.NotAfterStart.AsTime() + interval.lower = &t + } + if cfg.NotAfterLimit != nil { + if err := cfg.NotAfterLimit.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err) + } + t := cfg.NotAfterLimit.AsTime() + interval.upper = &t + } + + if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) { + return interval, errors.New("inverted interval") + } + return interval, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml new file mode 100644 index 00000000000..50ac5ef7b13 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml @@ -0,0 +1,214 @@ +############################################################################# +## The top section of this file is identical in the 3 cloudbuild.*yaml files. +## Make sure any edits you make here are copied over to the other files too +## if appropriate. +## +## TODO(al): consider if it's possible to merge these 3 files and control via +## substitutions. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] + +############################################################################ +## End of replicated section. +## Below are deployment specific steps for the CD env. +############################################################################ + +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - --cache-from=gcr.io/${PROJECT_ID}/ctfe + - . + waitFor: [-] +- id: build_envsubst + name: gcr.io/cloud-builders/docker + args: + - build + - trillian/examples/deployment/docker/envsubst + - -t + - envsubst + waitFor: ['ci_complete'] +- id: envsubst_kubernetes_configs + name: envsubst + args: + - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - trillian/examples/deployment/kubernetes/ctfe-service.yaml + - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - PROJECT_ID=${PROJECT_ID} + - IMAGE_TAG=${COMMIT_SHA} + waitFor: + - build_envsubst +- id: update_kubernetes_configs_dryrun + name: gcr.io/cloud-builders/kubectl + args: + - apply + - --dry-run=server + - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE} + - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME} + waitFor: + - envsubst_kubernetes_configs + - build_ctfe + +images: +- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} +- gcr.io/${PROJECT_ID}/ct_testbase:latest diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml new file mode 100644 index 00000000000..566edfe0f97 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml @@ -0,0 +1,230 @@ +############################################################################# +## The top section of this file is identical in the 3 cloudbuild.*yaml files. +## Make sure any edits you make here are copied over to the other files too +## if appropriate. +## +## TODO(al): consider if it's possible to merge these 3 files and control via +## substitutions. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] + +############################################################################ +## End of replicated section. +## Below are deployment specific steps for the CD env. +############################################################################ + +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - --cache-from=gcr.io/${PROJECT_ID}/ctfe + - . + waitFor: ["-"] +- id: push_ctfe + name: gcr.io/cloud-builders/docker + args: + - push + - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + waitFor: + - build_ctfe +- id: tag_latest_ctfe + name: gcr.io/cloud-builders/gcloud + args: + - container + - images + - add-tag + - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - gcr.io/${PROJECT_ID}/ctfe:latest + waitFor: + - push_ctfe +- id: build_envsubst + name: gcr.io/cloud-builders/docker + args: + - build + - trillian/examples/deployment/docker/envsubst + - -t + - envsubst + waitFor: ["-"] +- id: envsubst_kubernetes_configs + name: envsubst + args: + - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - trillian/examples/deployment/kubernetes/ctfe-service.yaml + - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - PROJECT_ID=${PROJECT_ID} + - IMAGE_TAG=${COMMIT_SHA} + waitFor: + - build_envsubst +- id: update_kubernetes_configs + name: gcr.io/cloud-builders/kubectl + args: + - apply + - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE} + - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME} + waitFor: + - envsubst_kubernetes_configs + - push_ctfe + +images: +- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} +- gcr.io/${PROJECT_ID}/ct_testbase:latest diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml new file mode 100644 index 00000000000..37faca72ac0 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml @@ -0,0 +1,161 @@ +############################################################################# +## This file is based on cloudbuild.yaml, but targets PostgreSQL instead of +## MySQL. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest postgresql_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest postgresql_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml pull postgresql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml up -d postgresql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z postgresql_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export POSTGRESQL_HOST="postgresql" + yes | bash "$${CT_GO_PATH}/scripts/resetpgctdb.sh" --verbose + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090' + - 'CONFIG_SUBDIR=/postgresql' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml new file mode 100644 index 00000000000..ce9efc2b2cb --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml @@ -0,0 +1,173 @@ +############################################################################# +## The top section of this file is identical in the 3 cloudbuild.*yaml files. +## Make sure any edits you make here are copied over to the other files too +## if appropriate. +## +## TODO(al): consider if it's possible to merge these 3 files and control via +## substitutions. +############################################################################# + +timeout: 1200s +options: + machineType: N1_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/google/certificate-transparency-go + - GOPATH=/go + +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0'] +- name: 'gcr.io/cloud-builders/docker' + args: [ + 'build', + '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest', + '-f', './integration/Dockerfile', + '.' + ] + +# prepare spins up an ephemeral trillian instance for testing use. +- name: gcr.io/$PROJECT_ID/ct_testbase + entrypoint: 'bash' + id: 'prepare' + args: + - '-exc' + - | + # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders. + docker pull gcr.io/$PROJECT_ID/log_server:latest + docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server + docker pull gcr.io/$PROJECT_ID/log_signer:latest + docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer + + # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo: + export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)" + + # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it: + echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml + + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer + docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer + +# Install proto related bits and block on Trillian being ready +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci-ready' + entrypoint: 'bash' + args: + - '-ec' + - | + go install \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ + github.com/golang/mock/mockgen \ + go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ + github.com/fullstorydev/grpcurl/cmd/grpcurl + + # Generate all protoc and mockgen files + go generate -run="protoc" ./... + go generate -run="mockgen" ./... + + # Cache all the modules we'll need too + go mod download + go test ./... + + # Wait for trillian logserver to be up + until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + waitFor: ['prepare'] + +# Run the presubmit tests +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'default_test' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --no-generate' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'race_detection' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_coverage' + env: + - 'GOFLAGS=' + - 'PRESUBMIT_OPTS=--no-linters --coverage' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'etcd_with_race' + env: + - 'GOFLAGS=-race' + - 'PRESUBMIT_OPTS=--no-linters' + - 'WITH_ETCD=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'with_pkcs11_and_race' + env: + - 'GOFLAGS=-race --tags=pkcs11' + - 'PRESUBMIT_OPTS=--no-linters' + - 'WITH_PKCS11=true' + - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090' + - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090' + waitFor: ['ci-ready'] + +# Collect and submit codecoverage reports +- name: 'gcr.io/cloud-builders/curl' + id: 'codecov.io' + entrypoint: bash + args: ['-c', 'bash <(curl -s https://codecov.io/bash)'] + env: + - 'VCS_COMMIT_ID=$COMMIT_SHA' + - 'VCS_BRANCH_NAME=$BRANCH_NAME' + - 'VCS_PULL_REQUEST=$_PR_NUMBER' + - 'CI_BUILD_ID=$BUILD_ID' + - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger + waitFor: ['etcd_with_coverage'] + +- name: gcr.io/$PROJECT_ID/ct_testbase + id: 'ci_complete' + entrypoint: /bin/true + waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race'] + +############################################################################ +## End of replicated section. +## Below are deployment specific steps for the CD env. +############################################################################ + +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME} + - --cache-from=gcr.io/${PROJECT_ID}/ctfe + - . + +images: +- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME} +- gcr.io/${PROJECT_ID}/ct_testbase:latest diff --git a/vendor/github.com/google/certificate-transparency-go/codecov.yml b/vendor/github.com/google/certificate-transparency-go/codecov.yml new file mode 100644 index 00000000000..7269ff27150 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/codecov.yml @@ -0,0 +1,19 @@ +# Customizations to codecov for c-t-go repo. This will be merged into +# the team / default codecov yaml file. +# +# Validate changes with: +# curl --data-binary @codecov.yml https://codecov.io/validate + +# Exclude code that's for testing, demos or utilities that aren't really +# part of production releases. +ignore: + - "**/mock_*.go" + - "**/testonly" + - "trillian/integration" + +coverage: + status: + project: + default: + # Allow 1% coverage drop without complaining, to avoid being too noisy. + threshold: 1% diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go new file mode 100644 index 00000000000..640fcec9c1f --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ctutil contains utilities for Certificate Transparency. +package ctutil + +import ( + "bytes" + "crypto" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +var emptyHash = [sha256.Size]byte{} + +// LeafHashB64 does as LeafHash does, but returns the leaf hash base64-encoded. +// The base64-encoded leaf hash returned by B64LeafHash can be used with the +// get-proof-by-hash API endpoint of Certificate Transparency Logs. +func LeafHashB64(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (string, error) { + hash, err := LeafHash(chain, sct, embedded) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(hash[:]), nil +} + +// LeafHash calculates the leaf hash of the certificate or precertificate at +// chain[0] that sct was issued for. +// +// sct is required because the SCT timestamp is used to calculate the leaf hash. +// Leaf hashes are unique to (pre)certificate-SCT pairs. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to calculate the leaf hash for a normal X.509 +// certificate then it is enough to just provide the end entity +// certificate in chain. This case assumes that the SCT being provided is +// not embedded within the leaf certificate provided, i.e. the certificate +// is what was submitted to the Certificate Transparency Log in order to +// obtain the SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to calculate the leaf hash for a precertificate +// then the issuing certificate must also be provided in chain. The +// precertificate should be at chain[0], and its issuer at chain[1]. For +// this case, set embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If using this function to calculate the leaf hash for a certificate +// where the SCT provided is embedded within the certificate you +// are providing at chain[0], set embedded to true. LeafHash will +// calculate the leaf hash by building the corresponding precertificate. +// LeafHash will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +// +// Note: LeafHash doesn't check that the provided SCT verifies for the given +// chain. It simply calculates what the leaf hash would be for the given +// (pre)certificate-SCT pair. +func LeafHash(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) ([sha256.Size]byte, error) { + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return emptyHash, err + } + return ct.LeafHashForLeaf(leaf) +} + +// VerifySCT takes the public key of a Certificate Transparency Log, a +// certificate chain, and an SCT and verifies whether the SCT is a valid SCT for +// the certificate at chain[0], signed by the Log that the public key belongs +// to. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCT(pubKey crypto.PublicKey, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + s, err := ct.NewSignatureVerifier(pubKey) + if err != nil { + return fmt.Errorf("error creating signature verifier: %s", err) + } + + return VerifySCTWithVerifier(s, chain, sct, embedded) +} + +// VerifySCTWithVerifier takes a ct.SignatureVerifier, a certificate chain, and +// an SCT and verifies whether the SCT is a valid SCT for the certificate at +// chain[0], signed by the Log whose public key was used to set up the +// ct.SignatureVerifier. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCTWithVerifier(sv *ct.SignatureVerifier, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + if sv == nil { + return errors.New("ct.SignatureVerifier is nil") + } + + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return err + } + + return sv.VerifySCTSignature(*sct, ct.LogEntry{Leaf: *leaf}) +} + +func createLeaf(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (*ct.MerkleTreeLeaf, error) { + if len(chain) == 0 { + return nil, errors.New("chain is empty") + } + if sct == nil { + return nil, errors.New("sct is nil") + } + + if embedded { + sctPresent, err := ContainsSCT(chain[0], sct) + if err != nil { + return nil, fmt.Errorf("error checking for SCT in leaf certificate: %s", err) + } + if !sctPresent { + return nil, errors.New("SCT provided is not embedded within leaf certificate") + } + } + + certType := ct.X509LogEntryType + if chain[0].IsPrecertificate() || embedded { + certType = ct.PrecertLogEntryType + } + + var leaf *ct.MerkleTreeLeaf + var err error + if embedded { + leaf, err = ct.MerkleTreeLeafForEmbeddedSCT(chain, sct.Timestamp) + } else { + leaf, err = ct.MerkleTreeLeafFromChain(chain, certType, sct.Timestamp) + } + if err != nil { + return nil, fmt.Errorf("error creating MerkleTreeLeaf: %s", err) + } + return leaf, nil +} + +// ContainsSCT checks to see whether the given SCT is embedded within the given +// certificate. +func ContainsSCT(cert *x509.Certificate, sct *ct.SignedCertificateTimestamp) (bool, error) { + if cert == nil || sct == nil { + return false, nil + } + + sctBytes, err := tls.Marshal(*sct) + if err != nil { + return false, fmt.Errorf("error tls.Marshalling SCT: %s", err) + } + for _, s := range cert.SCTList.SCTList { + if bytes.Equal(sctBytes, s.Val) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go new file mode 100644 index 00000000000..83d9739c7e5 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go @@ -0,0 +1,174 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ctutil + +import ( + "context" + "crypto/sha256" + "fmt" + "net/http" + "strings" + "sync" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/loglist3" + "github.com/google/certificate-transparency-go/x509" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// LogInfo holds the objects needed to perform per-log verification and +// validation of SCTs. +type LogInfo struct { + Description string + Client client.CheckLogClient + MMD time.Duration + Verifier *ct.SignatureVerifier + PublicKey []byte + + mu sync.RWMutex + lastSTH *ct.SignedTreeHead +} + +// NewLogInfo builds a LogInfo object based on a log list entry. +func NewLogInfo(log *loglist3.Log, hc *http.Client) (*LogInfo, error) { + url := log.URL + if !strings.HasPrefix(url, "https://") { + url = "https://" + url + } + lc, err := client.New(url, hc, jsonclient.Options{PublicKeyDER: log.Key, UserAgent: "ct-go-logclient"}) + if err != nil { + return nil, fmt.Errorf("failed to create client for log %q: %v", log.Description, err) + } + return newLogInfo(log, lc) +} + +func newLogInfo(log *loglist3.Log, lc client.CheckLogClient) (*LogInfo, error) { + logKey, err := x509.ParsePKIXPublicKey(log.Key) + if err != nil { + return nil, fmt.Errorf("failed to parse public key data for log %q: %v", log.Description, err) + } + verifier, err := ct.NewSignatureVerifier(logKey) + if err != nil { + return nil, fmt.Errorf("failed to build verifier log %q: %v", log.Description, err) + } + mmd := time.Duration(log.MMD) * time.Second + return &LogInfo{ + Description: log.Description, + Client: lc, + MMD: mmd, + Verifier: verifier, + PublicKey: log.Key, + }, nil +} + +// LogInfoByHash holds LogInfo objects index by the SHA-256 hash of the log's public key. +type LogInfoByHash map[[sha256.Size]byte]*LogInfo + +// LogInfoByKeyHash builds a map of LogInfo objects indexed by their key hashes. +func LogInfoByKeyHash(ll *loglist3.LogList, hc *http.Client) (LogInfoByHash, error) { + return logInfoByKeyHash(ll, hc, NewLogInfo) +} + +func logInfoByKeyHash(ll *loglist3.LogList, hc *http.Client, infoFactory func(*loglist3.Log, *http.Client) (*LogInfo, error)) (map[[sha256.Size]byte]*LogInfo, error) { + result := make(map[[sha256.Size]byte]*LogInfo) + for _, operator := range ll.Operators { + for _, log := range operator.Logs { + h := sha256.Sum256(log.Key) + li, err := infoFactory(log, hc) + if err != nil { + return nil, err + } + result[h] = li + } + } + return result, nil +} + +// LastSTH returns the last STH known for the log. +func (li *LogInfo) LastSTH() *ct.SignedTreeHead { + li.mu.RLock() + defer li.mu.RUnlock() + return li.lastSTH +} + +// SetSTH sets the last STH known for the log. +func (li *LogInfo) SetSTH(sth *ct.SignedTreeHead) { + li.mu.Lock() + defer li.mu.Unlock() + li.lastSTH = sth +} + +// VerifySCTSignature checks the signature in the SCT matches the given leaf (adjusted for the +// timestamp in the SCT) and log. +func (li *LogInfo) VerifySCTSignature(sct ct.SignedCertificateTimestamp, leaf ct.MerkleTreeLeaf) error { + leaf.TimestampedEntry.Timestamp = sct.Timestamp + if err := li.Verifier.VerifySCTSignature(sct, ct.LogEntry{Leaf: leaf}); err != nil { + return fmt.Errorf("failed to verify SCT signature from log %q: %v", li.Description, err) + } + return nil +} + +// VerifyInclusionLatest checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the latest known tree size of the log. If no tree size for the log is known, it will +// be queried. On success, returns the index of the leaf in the log. +func (li *LogInfo) VerifyInclusionLatest(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth := li.LastSTH() + if sth == nil { + var err error + sth, err = li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + } + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusion checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the current tree size of the log. On success, returns the index of the leaf +// in the log. +func (li *LogInfo) VerifyInclusion(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth, err := li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusionAt checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the given tree size & root hash of the log. On success, returns the index of the +// leaf in the log. +func (li *LogInfo) VerifyInclusionAt(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp, treeSize uint64, rootHash []byte) (int64, error) { + leaf.TimestampedEntry.Timestamp = timestamp + leafHash, err := ct.LeafHashForLeaf(&leaf) + if err != nil { + return -1, fmt.Errorf("failed to create leaf hash: %v", err) + } + + rsp, err := li.Client.GetProofByHash(ctx, leafHash[:], treeSize) + if err != nil { + return -1, fmt.Errorf("failed to GetProofByHash(sct,size=%d): %v", treeSize, err) + } + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(rsp.LeafIndex), treeSize, leafHash[:], rsp.AuditPath, rootHash); err != nil { + return -1, fmt.Errorf("failed to verify inclusion proof at size %d: %v", treeSize, err) + } + return rsp.LeafIndex, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go b/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go new file mode 100644 index 00000000000..540e717d74e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go @@ -0,0 +1,92 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package x509ext holds extensions types and values for minimal gossip. +package x509ext + +import ( + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" + + ct "github.com/google/certificate-transparency-go" +) + +// OIDExtensionCTSTH is the OID value for an X.509 extension that holds +// a log STH value. +// TODO(drysdale): get an official OID value +var OIDExtensionCTSTH = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5} + +// OIDExtKeyUsageCTMinimalGossip is the OID value for an extended key usage +// (EKU) that indicates a leaf certificate is used for the validation of STH +// values from public CT logs. +// TODO(drysdale): get an official OID value +var OIDExtKeyUsageCTMinimalGossip = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 6} + +// LogSTHInfo is the structure that gets TLS-encoded into the X.509 extension +// identified by OIDExtensionCTSTH. +type LogSTHInfo struct { + LogURL []byte `tls:"maxlen:255"` + Version tls.Enum `tls:"maxval:255"` + TreeSize uint64 + Timestamp uint64 + SHA256RootHash ct.SHA256Hash + TreeHeadSignature ct.DigitallySigned +} + +// LogSTHInfoFromCert retrieves the STH information embedded in a certificate. +func LogSTHInfoFromCert(cert *x509.Certificate) (*LogSTHInfo, error) { + for _, ext := range cert.Extensions { + if ext.Id.Equal(OIDExtensionCTSTH) { + var sthInfo LogSTHInfo + rest, err := tls.Unmarshal(ext.Value, &sthInfo) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal STH: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after STH", len(rest)) + } + return &sthInfo, nil + } + } + return nil, errors.New("no STH extension found") +} + +// HasSTHInfo indicates whether a certificate has embedded STH information. +func HasSTHInfo(cert *x509.Certificate) bool { + for _, ext := range cert.Extensions { + if ext.Id.Equal(OIDExtensionCTSTH) { + return true + } + } + return false +} + +// STHFromCert retrieves the STH embedded in a certificate; note the returned STH +// does not have the LogID field filled in. +func STHFromCert(cert *x509.Certificate) (*ct.SignedTreeHead, error) { + sthInfo, err := LogSTHInfoFromCert(cert) + if err != nil { + return nil, err + } + return &ct.SignedTreeHead{ + Version: ct.Version(sthInfo.Version), + TreeSize: sthInfo.TreeSize, + Timestamp: sthInfo.Timestamp, + SHA256RootHash: sthInfo.SHA256RootHash, + TreeHeadSignature: sthInfo.TreeHeadSignature, + }, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go new file mode 100644 index 00000000000..30932f30d1a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go @@ -0,0 +1,72 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonclient + +import ( + "sync" + "time" +) + +type backoff struct { + mu sync.RWMutex + multiplier uint + notBefore time.Time +} + +const ( + // maximum backoff is 2^(maxMultiplier-1) = 128 seconds + maxMultiplier = 8 +) + +func (b *backoff) set(override *time.Duration) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + if b.notBefore.After(time.Now()) { + if override != nil { + // If existing backoff is set but override would be longer than + // it then set it to that. + notBefore := time.Now().Add(*override) + if notBefore.After(b.notBefore) { + b.notBefore = notBefore + } + } + return time.Until(b.notBefore) + } + var wait time.Duration + if override != nil { + wait = *override + } else { + if b.multiplier < maxMultiplier { + b.multiplier++ + } + wait = time.Second * time.Duration(1<<(b.multiplier-1)) + } + b.notBefore = time.Now().Add(wait) + return wait +} + +func (b *backoff) decreaseMultiplier() { + b.mu.Lock() + defer b.mu.Unlock() + if b.multiplier > 0 { + b.multiplier-- + } +} + +func (b *backoff) until() time.Time { + b.mu.RLock() + defer b.mu.RUnlock() + return b.notBefore +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go new file mode 100644 index 00000000000..edb8f919afe --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go @@ -0,0 +1,336 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonclient provides a simple client for fetching and parsing +// JSON CT structures from a log. +package jsonclient + +import ( + "bytes" + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +const maxJitter = 250 * time.Millisecond + +type backoffer interface { + // set adjusts/increases the current backoff interval (typically on retryable failure); + // if the optional parameter is provided, this will be used as the interval if it is greater + // than the currently set interval. Returns the current wait period so that it can be + // logged along with any error message. + set(*time.Duration) time.Duration + // decreaseMultiplier reduces the current backoff multiplier, typically on success. + decreaseMultiplier() + // until returns the time until which the client should wait before making a request, + // it may be in the past in which case it should be ignored. + until() time.Time +} + +// JSONClient provides common functionality for interacting with a JSON server +// that uses cryptographic signatures. +type JSONClient struct { + uri string // the base URI of the server. e.g. https://ct.googleapis/pilot + httpClient *http.Client // used to interact with the server via HTTP + Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available) + logger Logger // interface to use for logging warnings and errors + backoff backoffer // object used to store and calculate backoff information + userAgent string // If set, this is sent as the UserAgent header. + authorization string // If set, this is sent as the Authorization header. +} + +// Logger is a simple logging interface used to log internal errors and warnings +type Logger interface { + // Printf formats and logs a message + Printf(string, ...interface{}) +} + +// Options are the options for creating a new JSONClient. +type Options struct { + // Interface to use for logging warnings and errors, if nil the + // standard library log package will be used. + Logger Logger + // PEM format public key to use for signature verification. + PublicKey string + // DER format public key to use for signature verification. + PublicKeyDER []byte + // UserAgent, if set, will be sent as the User-Agent header with each request. + UserAgent string + // If set, this is sent as the Authorization header with each request. + Authorization string +} + +// ParsePublicKey parses and returns the public key contained in opts. +// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used. +// If neither is set, nil will be returned. +func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) { + if len(opts.PublicKeyDER) > 0 { + return x509.ParsePKIXPublicKey(opts.PublicKeyDER) + } + + if opts.PublicKey != "" { + pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey)) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, errors.New("extra data found after PEM key decoded") + } + return pubkey, nil + } + + return nil, nil +} + +type basicLogger struct{} + +func (bl *basicLogger) Printf(msg string, args ...interface{}) { + log.Printf(msg, args...) +} + +// RspError represents an error that occurred when processing a response from a server, +// and also includes key details from the http.Response that triggered the error. +type RspError struct { + Err error + StatusCode int + Body []byte +} + +// Error formats the RspError instance, focusing on the error. +func (e RspError) Error() string { + return e.Err.Error() +} + +// New constructs a new JSONClient instance, for the given base URI, using the +// given http.Client object (if provided) and the Options object. +// If opts does not specify a public key, signatures will not be verified. +func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) { + pubkey, err := opts.ParsePublicKey() + if err != nil { + return nil, fmt.Errorf("invalid public key: %v", err) + } + + var verifier *ct.SignatureVerifier + if pubkey != nil { + var err error + verifier, err = ct.NewSignatureVerifier(pubkey) + if err != nil { + return nil, err + } + } + + if hc == nil { + hc = new(http.Client) + } + logger := opts.Logger + if logger == nil { + logger = &basicLogger{} + } + return &JSONClient{ + uri: strings.TrimRight(uri, "/"), + httpClient: hc, + Verifier: verifier, + logger: logger, + backoff: &backoff{}, + userAgent: opts.UserAgent, + authorization: opts.Authorization, + }, nil +} + +// BaseURI returns the base URI that the JSONClient makes queries to. +func (c *JSONClient) BaseURI() string { + return c.uri +} + +// GetAndParse makes a HTTP GET call to the given path, and attempts to parse +// the response as a JSON representation of the rsp structure. Returns the +// http.Response, the body of the response, and an error (which may be of +// type RspError if the HTTP response was available). It returns an error +// if the response status code is not 200 OK. +func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a GET request with URL-encoded parameters. + vals := url.Values{} + for k, v := range params { + vals.Add(k, v) + } + fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode()) + httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, fullURI, nil) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + if len(c.authorization) != 0 { + httpReq.Header.Add("Authorization", c.authorization) + } + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, nil, err + } + body, err := io.ReadAll(httpRsp.Body) + if err != nil { + return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %w", err), StatusCode: httpRsp.StatusCode, Body: body} + } + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, RspError{Err: fmt.Errorf("failed to close response body: %w", err), StatusCode: httpRsp.StatusCode, Body: body} + } + if httpRsp.StatusCode != http.StatusOK { + return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil { + return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + return httpRsp, body, nil +} + +// PostAndParse makes a HTTP POST call to the given path, including the request +// parameters, and attempts to parse the response as a JSON representation of +// the rsp structure. Returns the http.Response, the body of the response, and +// an error (which may be of type RspError if the HTTP response was available). +// It does NOT return an error if the response status code is not 200 OK. +func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a POST request with JSON body. + postBody, err := json.Marshal(req) + if err != nil { + return nil, nil, err + } + fullURI := fmt.Sprintf("%s%s", c.uri, path) + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURI, bytes.NewReader(postBody)) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + if len(c.authorization) != 0 { + httpReq.Header.Add("Authorization", c.authorization) + } + httpReq.Header.Set("Content-Type", "application/json") + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, nil, err + } + body, err := io.ReadAll(httpRsp.Body) + if err != nil { + _ = httpRsp.Body.Close() + return nil, nil, err + } + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, err + } + if httpRsp.Request.Method != http.MethodPost { + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections#permanent_redirections + return nil, nil, fmt.Errorf("POST request to %q was converted to %s request to %q", fullURI, httpRsp.Request.Method, httpRsp.Request.URL) + } + + if httpRsp.StatusCode == http.StatusOK { + if err := json.Unmarshal(body, &rsp); err != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + } + return httpRsp, body, nil +} + +// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned +// not before time is in the past it returns immediately. +func (c *JSONClient) waitForBackoff(ctx context.Context) error { + dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000))))) + if dur < 0 { + dur = 0 + } + backoffTimer := time.NewTimer(dur) + select { + case <-ctx.Done(): + return ctx.Err() + case <-backoffTimer.C: + } + return nil +} + +// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on +// retryable errors; the caller should set a deadline on the provided context +// to prevent infinite retries. Return values are as for PostAndParse. +func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + for { + httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp) + if err != nil { + // Don't retry context errors. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, nil, err + } + wait := c.backoff.set(nil) + c.logger.Printf("Request to %s failed, backing-off %s: %s", c.uri, wait, err) + } else { + switch httpRsp.StatusCode { + case http.StatusOK: + return httpRsp, body, nil + case http.StatusRequestTimeout: + // Request timeout, retry immediately + c.logger.Printf("Request to %s timed out, retrying immediately", c.uri) + case http.StatusServiceUnavailable: + fallthrough + case http.StatusTooManyRequests: + var backoff *time.Duration + // Retry-After may be either a number of seconds as a int or a RFC 1123 + // date string (RFC 7231 Section 7.1.3) + if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" { + if seconds, err := strconv.Atoi(retryAfter); err == nil { + b := time.Duration(seconds) * time.Second + backoff = &b + } else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil { + b := time.Until(date) + backoff = &b + } + } + wait := c.backoff.set(backoff) + c.logger.Printf("Request to %s failed, backing-off for %s: got HTTP status %s", c.uri, wait, httpRsp.Status) + default: + return nil, nil, RspError{ + StatusCode: httpRsp.StatusCode, + Body: body, + Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)} + } + } + if err := c.waitForBackoff(ctx); err != nil { + return nil, nil, err + } + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go new file mode 100644 index 00000000000..9ac54bae912 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go @@ -0,0 +1,129 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loglist3 + +import ( + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" +) + +// LogRoots maps Log-URLs (stated at LogList) to the pools of their accepted +// root-certificates. +type LogRoots map[string]*x509util.PEMCertPool + +// Compatible creates a new LogList containing only Logs matching the temporal, +// root-acceptance and Log-status conditions. +func (ll *LogList) Compatible(cert *x509.Certificate, certRoot *x509.Certificate, roots LogRoots) LogList { + active := ll.TemporallyCompatible(cert) + // Do not check root compatbility if roots are not being provided. + if certRoot == nil { + return active + } + return active.RootCompatible(certRoot, roots) +} + +// SelectByStatus creates a new LogList containing only logs with status +// provided from the original. +func (ll *LogList) SelectByStatus(lstats []LogStatus) LogList { + var active LogList + for _, op := range ll.Operators { + activeOp := *op + activeOp.Logs = []*Log{} + for _, l := range op.Logs { + for _, lstat := range lstats { + if l.State.LogStatus() == lstat { + activeOp.Logs = append(activeOp.Logs, l) + break + } + } + } + if len(activeOp.Logs) > 0 { + active.Operators = append(active.Operators, &activeOp) + } + } + return active +} + +// RootCompatible creates a new LogList containing only the logs of original +// LogList that are compatible with the provided cert, according to +// the passed in collection of per-log roots. Logs that are missing from +// the collection are treated as always compatible and included, even if +// an empty cert root is passed in. +// Cert-root when provided is expected to be CA-cert. +func (ll *LogList) RootCompatible(certRoot *x509.Certificate, roots LogRoots) LogList { + var compatible LogList + + // Check whether root is a CA-cert. + if certRoot != nil && !certRoot.IsCA { + // Compatible method expects fully rooted chain, while last cert of the chain provided is not root. + // Proceed anyway. + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + // If root set is not defined, we treat Log as compatible assuming no + // knowledge of its roots. + if _, ok := roots[l.URL]; !ok { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + + if certRoot == nil { + continue + } + + // Check root is accepted. + if roots[l.URL].Included(certRoot) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} + +// TemporallyCompatible creates a new LogList containing only the logs of +// original LogList that are compatible with the provided cert, according to +// NotAfter and TemporalInterval matching. +// Returns empty LogList if nil-cert is provided. +func (ll *LogList) TemporallyCompatible(cert *x509.Certificate) LogList { + var compatible LogList + if cert == nil { + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + if l.TemporalInterval == nil { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + if cert.NotAfter.Before(l.TemporalInterval.EndExclusive) && (cert.NotAfter.After(l.TemporalInterval.StartInclusive) || cert.NotAfter.Equal(l.TemporalInterval.StartInclusive)) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go new file mode 100644 index 00000000000..c5e94f1874f --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go @@ -0,0 +1,427 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loglist3 allows parsing and searching of the master CT Log list. +// It expects the log list to conform to the v3 schema. +package loglist3 + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + "unicode" + + "github.com/google/certificate-transparency-go/tls" +) + +const ( + // LogListURL has the master URL for Google Chrome's log list. + LogListURL = "https://www.gstatic.com/ct/log_list/v3/log_list.json" + // LogListSignatureURL has the URL for the signature over Google Chrome's log list. + LogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/log_list.sig" + // AllLogListURL has the URL for the list of all known logs. + AllLogListURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.json" + // AllLogListSignatureURL has the URL for the signature over the list of all known logs. + AllLogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.sig" +) + +// Manually mapped from https://www.gstatic.com/ct/log_list/v3/log_list_schema.json + +// LogList holds a collection of CT logs, grouped by operator. +type LogList struct { + // IsAllLogs is set to true if the list contains all known logs, not + // only usable ones. + IsAllLogs bool `json:"is_all_logs,omitempty"` + // Version is the version of the log list. + Version string `json:"version,omitempty"` + // LogListTimestamp is the time at which the log list was published. + LogListTimestamp time.Time `json:"log_list_timestamp,omitempty"` + // Operators is a list of CT log operators and the logs they operate. + Operators []*Operator `json:"operators"` +} + +// Operator holds a collection of CT logs run by the same organisation. +// It also provides information about that organisation, e.g. contact details. +type Operator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // Email lists the email addresses that can be used to contact this log + // operator. + Email []string `json:"email"` + // Logs is a list of RFC 6962 CT logs run by this operator. + Logs []*Log `json:"logs"` + // TiledLogs is a list of Static CT API CT logs run by this operator. + TiledLogs []*TiledLog `json:"tiled_logs"` +} + +// Log describes a single RFC 6962 CT log. It is nearly the same as the TiledLog struct, +// but has a single URL field instead of SubmissionURL and MonitoringURL fields. +type Log struct { + // Description is a human-readable string that describes the log. + Description string `json:"description,omitempty"` + // LogID is the SHA-256 hash of the log's public key. + LogID []byte `json:"log_id"` + // Key is the public key with which signatures can be verified. + Key []byte `json:"key"` + // URL is the address of the HTTPS API. + URL string `json:"url"` + // DNS is the address of the DNS API. + DNS string `json:"dns,omitempty"` + // MMD is the Maximum Merge Delay, in seconds. All submitted + // certificates must be incorporated into the log within this time. + MMD int32 `json:"mmd"` + // PreviousOperators is a list of previous operators and the timestamp + // of when they stopped running the log. + PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"` + // State is the current state of the log, from the perspective of the + // log list distributor. + State *LogStates `json:"state,omitempty"` + // TemporalInterval, if set, indicates that this log only accepts + // certificates with a NotAfter date in this time range. + TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"` + // Type indicates the purpose of this log, e.g. "test" or "prod". + Type string `json:"log_type,omitempty"` +} + +// TiledLog describes a Static CT API log. It is nearly the same as the Log struct, +// but has both SubmissionURL and MonitoringURL fields instead of a single URL field. +type TiledLog struct { + // Description is a human-readable string that describes the log. + Description string `json:"description,omitempty"` + // LogID is the SHA-256 hash of the log's public key. + LogID []byte `json:"log_id"` + // Key is the public key with which signatures can be verified. + Key []byte `json:"key"` + // SubmissionURL + SubmissionURL string `json:"submission_url"` + // MonitoringURL + MonitoringURL string `json:"monitoring_url"` + // DNS is the address of the DNS API. + DNS string `json:"dns,omitempty"` + // MMD is the Maximum Merge Delay, in seconds. All submitted + // certificates must be incorporated into the log within this time. + MMD int32 `json:"mmd"` + // PreviousOperators is a list of previous operators and the timestamp + // of when they stopped running the log. + PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"` + // State is the current state of the log, from the perspective of the + // log list distributor. + State *LogStates `json:"state,omitempty"` + // TemporalInterval, if set, indicates that this log only accepts + // certificates with a NotAfter date in this time range. + TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"` + // Type indicates the purpose of this log, e.g. "test" or "prod". + Type string `json:"log_type,omitempty"` +} + +// PreviousOperator holds information about a log operator and the time at which +// they stopped running a log. +type PreviousOperator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // EndTime is the time at which the operator stopped running a log. + EndTime time.Time `json:"end_time"` +} + +// TemporalInterval is a time range. +type TemporalInterval struct { + // StartInclusive is the beginning of the time range. + StartInclusive time.Time `json:"start_inclusive"` + // EndExclusive is just after the end of the time range. + EndExclusive time.Time `json:"end_exclusive"` +} + +// LogStatus indicates Log status. +type LogStatus int + +// LogStatus values +const ( + UndefinedLogStatus LogStatus = iota + PendingLogStatus + QualifiedLogStatus + UsableLogStatus + ReadOnlyLogStatus + RetiredLogStatus + RejectedLogStatus +) + +//go:generate stringer -type=LogStatus + +// LogStates are the states that a CT log can be in, from the perspective of a +// user agent. Only one should be set - this is the current state. +type LogStates struct { + // Pending indicates that the log is in the "pending" state. + Pending *LogState `json:"pending,omitempty"` + // Qualified indicates that the log is in the "qualified" state. + Qualified *LogState `json:"qualified,omitempty"` + // Usable indicates that the log is in the "usable" state. + Usable *LogState `json:"usable,omitempty"` + // ReadOnly indicates that the log is in the "readonly" state. + ReadOnly *ReadOnlyLogState `json:"readonly,omitempty"` + // Retired indicates that the log is in the "retired" state. + Retired *LogState `json:"retired,omitempty"` + // Rejected indicates that the log is in the "rejected" state. + Rejected *LogState `json:"rejected,omitempty"` +} + +// LogState contains details on the current state of a CT log. +type LogState struct { + // Timestamp is the time when the state began. + Timestamp time.Time `json:"timestamp"` +} + +// ReadOnlyLogState contains details on the current state of a read-only CT log. +type ReadOnlyLogState struct { + LogState + // FinalTreeHead is the root hash and tree size at which the CT log was + // made read-only. This should never change while the log is read-only. + FinalTreeHead TreeHead `json:"final_tree_head"` +} + +// TreeHead is the root hash and tree size of a CT log. +type TreeHead struct { + // SHA256RootHash is the root hash of the CT log's Merkle tree. + SHA256RootHash []byte `json:"sha256_root_hash"` + // TreeSize is the size of the CT log's Merkle tree. + TreeSize int64 `json:"tree_size"` +} + +// LogStatus method returns Log-status enum value for descriptive struct. +func (ls *LogStates) LogStatus() LogStatus { + switch { + case ls == nil: + return UndefinedLogStatus + case ls.Pending != nil: + return PendingLogStatus + case ls.Qualified != nil: + return QualifiedLogStatus + case ls.Usable != nil: + return UsableLogStatus + case ls.ReadOnly != nil: + return ReadOnlyLogStatus + case ls.Retired != nil: + return RetiredLogStatus + case ls.Rejected != nil: + return RejectedLogStatus + default: + return UndefinedLogStatus + } +} + +// String method returns printable name of the state. +func (ls *LogStates) String() string { + return ls.LogStatus().String() +} + +// Active picks the set-up state. If multiple states are set (not expected) picks one of them. +func (ls *LogStates) Active() (*LogState, *ReadOnlyLogState) { + if ls == nil { + return nil, nil + } + switch { + case ls.Pending != nil: + return ls.Pending, nil + case ls.Qualified != nil: + return ls.Qualified, nil + case ls.Usable != nil: + return ls.Usable, nil + case ls.ReadOnly != nil: + return nil, ls.ReadOnly + case ls.Retired != nil: + return ls.Retired, nil + case ls.Rejected != nil: + return ls.Rejected, nil + default: + return nil, nil + } +} + +// GoogleOperated returns whether Operator is considered to be Google. +func (op *Operator) GoogleOperated() bool { + for _, email := range op.Email { + if strings.Contains(email, "google-ct-logs@googlegroups") { + return true + } + } + return false +} + +// NewFromJSON creates a LogList from JSON encoded data. +func NewFromJSON(llData []byte) (*LogList, error) { + var ll LogList + if err := json.Unmarshal(llData, &ll); err != nil { + return nil, fmt.Errorf("failed to parse log list: %v", err) + } + return &ll, nil +} + +// NewFromSignedJSON creates a LogList from JSON encoded data, checking a +// signature along the way. The signature data should be provided as the +// raw signature data. +func NewFromSignedJSON(llData, rawSig []byte, pubKey crypto.PublicKey) (*LogList, error) { + var sigAlgo tls.SignatureAlgorithm + switch pkType := pubKey.(type) { + case *rsa.PublicKey: + sigAlgo = tls.RSA + case *ecdsa.PublicKey: + sigAlgo = tls.ECDSA + default: + return nil, fmt.Errorf("unsupported public key type %v", pkType) + } + tlsSig := tls.DigitallySigned{ + Algorithm: tls.SignatureAndHashAlgorithm{ + Hash: tls.SHA256, + Signature: sigAlgo, + }, + Signature: rawSig, + } + if err := tls.VerifySignature(pubKey, llData, tlsSig); err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + return NewFromJSON(llData) +} + +// FindLogByName returns all logs whose names contain the given string. +func (ll *LogList) FindLogByName(name string) []*Log { + name = strings.ToLower(name) + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + if strings.Contains(strings.ToLower(log.Description), name) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByURL finds the log with the given URL. +func (ll *LogList) FindLogByURL(url string) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + // Don't count trailing slashes + if strings.TrimRight(log.URL, "/") == strings.TrimRight(url, "/") { + return log + } + } + } + return nil +} + +// FindLogByKeyHash finds the log with the given key hash. +func (ll *LogList) FindLogByKeyHash(keyhash [sha256.Size]byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.LogID, keyhash[:]) { + return log + } + } + } + return nil +} + +// FindLogByKeyHashPrefix finds all logs whose key hash starts with the prefix. +func (ll *LogList) FindLogByKeyHashPrefix(prefix string) []*Log { + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + hh := hex.EncodeToString(log.LogID[:]) + if strings.HasPrefix(hh, prefix) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByKey finds the log with the given DER-encoded key. +func (ll *LogList) FindLogByKey(key []byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.Key[:], key) { + return log + } + } + } + return nil +} + +var hexDigits = regexp.MustCompile("^[0-9a-fA-F]+$") + +// FuzzyFindLog tries to find logs that match the given unspecified input, +// whose format is unspecified. This generally returns a single log, but +// if text input that matches multiple log descriptions is provided, then +// multiple logs may be returned. +func (ll *LogList) FuzzyFindLog(input string) []*Log { + input = strings.Trim(input, " \t") + if logs := ll.FindLogByName(input); len(logs) > 0 { + return logs + } + if log := ll.FindLogByURL(input); log != nil { + return []*Log{log} + } + // Try assuming the input is binary data of some form. First base64: + if data, err := base64.StdEncoding.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Now hex, but strip all internal whitespace first. + input = stripInternalSpace(input) + if data, err := hex.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Finally, allow hex strings with an odd number of digits. + if hexDigits.MatchString(input) { + if logs := ll.FindLogByKeyHashPrefix(input); len(logs) > 0 { + return logs + } + } + + return nil +} + +func stripInternalSpace(input string) string { + return strings.Map(func(r rune) rune { + if !unicode.IsSpace(r) { + return r + } + return -1 + }, input) +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go new file mode 100644 index 00000000000..84c7bbdf4e4 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=LogStatus"; DO NOT EDIT. + +package loglist3 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UndefinedLogStatus-0] + _ = x[PendingLogStatus-1] + _ = x[QualifiedLogStatus-2] + _ = x[UsableLogStatus-3] + _ = x[ReadOnlyLogStatus-4] + _ = x[RetiredLogStatus-5] + _ = x[RejectedLogStatus-6] +} + +const _LogStatus_name = "UndefinedLogStatusPendingLogStatusQualifiedLogStatusUsableLogStatusReadOnlyLogStatusRetiredLogStatusRejectedLogStatus" + +var _LogStatus_index = [...]uint8{0, 18, 34, 52, 67, 84, 100, 117} + +func (i LogStatus) String() string { + if i < 0 || i >= LogStatus(len(_LogStatus_index)-1) { + return "LogStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _LogStatus_name[_LogStatus_index[i]:_LogStatus_index[i+1]] +} diff --git a/vendor/github.com/google/certificate-transparency-go/proto_gen.go b/vendor/github.com/google/certificate-transparency-go/proto_gen.go new file mode 100644 index 00000000000..565c6bbbc82 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/proto_gen.go @@ -0,0 +1,25 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +// We do the protoc generation here (rather than in the individual directories) +// in order to work around the newly-enforced rule that all protobuf file "names" +// must be unique. +// See https://developers.google.com/protocol-buffers/docs/proto#packages and +// https://github.com/golang/protobuf/issues/1122 + +//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/ctfe/configpb/config.proto" +//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/migrillian/configpb/config.proto" +//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. client/configpb/multilog.proto" diff --git a/vendor/github.com/google/certificate-transparency-go/serialization.go b/vendor/github.com/google/certificate-transparency-go/serialization.go new file mode 100644 index 00000000000..2a6c21ed4cf --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/serialization.go @@ -0,0 +1,317 @@ +// Copyright 2015 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +import ( + "crypto" + "crypto/sha256" + "fmt" + "time" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +// SerializeSCTSignatureInput serializes the passed in sct and log entry into +// the correct format for signing. +func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) { + switch sct.SCTVersion { + case V1: + input := CertificateTimestamp{ + SCTVersion: sct.SCTVersion, + SignatureType: CertificateTimestampSignatureType, + Timestamp: sct.Timestamp, + EntryType: entry.Leaf.TimestampedEntry.EntryType, + Extensions: sct.Extensions, + } + switch entry.Leaf.TimestampedEntry.EntryType { + case X509LogEntryType: + input.X509Entry = entry.Leaf.TimestampedEntry.X509Entry + case PrecertLogEntryType: + input.PrecertEntry = &PreCert{ + IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, + TBSCertificate: entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate, + } + default: + return nil, fmt.Errorf("unsupported entry type %s", entry.Leaf.TimestampedEntry.EntryType) + } + return tls.Marshal(input) + default: + return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion) + } +} + +// SerializeSTHSignatureInput serializes the passed in STH into the correct +// format for signing. +func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) { + switch sth.Version { + case V1: + if len(sth.SHA256RootHash) != crypto.SHA256.Size() { + return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size()) + } + + input := TreeHeadSignature{ + Version: sth.Version, + SignatureType: TreeHashSignatureType, + Timestamp: sth.Timestamp, + TreeSize: sth.TreeSize, + SHA256RootHash: sth.SHA256RootHash, + } + return tls.Marshal(input) + default: + return nil, fmt.Errorf("unsupported STH version %d", sth.Version) + } +} + +// CreateX509MerkleTreeLeaf generates a MerkleTreeLeaf for an X509 cert +func CreateX509MerkleTreeLeaf(cert ASN1Cert, timestamp uint64) *MerkleTreeLeaf { + return &MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + Timestamp: timestamp, + EntryType: X509LogEntryType, + X509Entry: &cert, + }, + } +} + +// MerkleTreeLeafFromRawChain generates a MerkleTreeLeaf from a chain (in DER-encoded form) and timestamp. +func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) { + // Need at most 3 of the chain + count := 3 + if count > len(rawChain) { + count = len(rawChain) + } + chain := make([]*x509.Certificate, count) + for i := range chain { + cert, err := x509.ParseCertificate(rawChain[i].Data) + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err) + } + chain[i] = cert + } + return MerkleTreeLeafFromChain(chain, etype, timestamp) +} + +// MerkleTreeLeafFromChain generates a MerkleTreeLeaf from a chain and timestamp. +func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) { + leaf := MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + EntryType: etype, + Timestamp: timestamp, + }, + } + if etype == X509LogEntryType { + leaf.TimestampedEntry.X509Entry = &ASN1Cert{Data: chain[0].Raw} + return &leaf, nil + } + if etype != PrecertLogEntryType { + return nil, fmt.Errorf("unknown LogEntryType %d", etype) + } + + // Pre-certs are more complicated. First, parse the leaf pre-cert and its + // putative issuer. + if len(chain) < 2 { + return nil, fmt.Errorf("no issuer cert available for precert leaf building") + } + issuer := chain[1] + cert := chain[0] + + var preIssuer *x509.Certificate + if IsPreIssuer(issuer) { + // Replace the cert's issuance information with details from the pre-issuer. + preIssuer = issuer + + // The issuer of the pre-cert is not going to be the issuer of the final + // cert. Change to use the final issuer's key hash. + if len(chain) < 3 { + return nil, fmt.Errorf("no issuer cert available for pre-issuer") + } + issuer = chain[2] + } + + // Next, post-process the DER-encoded TBSCertificate, to remove the CT poison + // extension and possibly update the issuer field. + defangedTBS, err := x509.BuildPrecertTBS(cert.RawTBSCertificate, preIssuer) + if err != nil { + return nil, fmt.Errorf("failed to remove poison extension: %v", err) + } + + leaf.TimestampedEntry.EntryType = PrecertLogEntryType + leaf.TimestampedEntry.PrecertEntry = &PreCert{ + IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo), + TBSCertificate: defangedTBS, + } + return &leaf, nil +} + +// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an +// SCT timestamp, where the leaf certificate at chain[0] is a certificate that +// contains embedded SCTs. It is assumed that the timestamp provided is from +// one of the SCTs embedded within the leaf certificate. +func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) { + // For building the leaf for a certificate and SCT where the SCT is embedded + // in the certificate, we need to build the original precertificate TBS + // data. First, parse the leaf cert and its issuer. + if len(chain) < 2 { + return nil, fmt.Errorf("no issuer cert available for precert leaf building") + } + issuer := chain[1] + cert := chain[0] + + // Next, post-process the DER-encoded TBSCertificate, to remove the SCTList + // extension. + tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate) + if err != nil { + return nil, fmt.Errorf("failed to remove SCT List extension: %v", err) + } + + return &MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + EntryType: PrecertLogEntryType, + Timestamp: timestamp, + PrecertEntry: &PreCert{ + IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo), + TBSCertificate: tbs, + }, + }, + }, nil +} + +// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf. +func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) { + leafData, err := tls.Marshal(*leaf) + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err) + } + + data := append([]byte{TreeLeafPrefix}, leafData...) + leafHash := sha256.Sum256(data) + return leafHash, nil +} + +// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific +// certificate transparency extended key usage. +func IsPreIssuer(issuer *x509.Certificate) bool { + for _, eku := range issuer.ExtKeyUsage { + if eku == x509.ExtKeyUsageCertificateTransparency { + return true + } + } + return false +} + +// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data +// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure). +func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) { + ret := RawLogEntry{Index: index} + if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil { + return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest)) + } + + switch eType := ret.Leaf.TimestampedEntry.EntryType; eType { + case X509LogEntryType: + var certChain CertificateChain + if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil { + return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest)) + } + ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry + ret.Chain = certChain.Entries + + case PrecertLogEntryType: + var precertChain PrecertChainEntry + if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil { + return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest)) + } + ret.Cert = precertChain.PreCertificate + ret.Chain = precertChain.CertificateChain + + default: + // TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types + // are not errors. We should revisit how we process this case. + return nil, fmt.Errorf("unknown entry type: %v", eType) + } + + return &ret, nil +} + +// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed +// (pre-)certificate. +// +// Note that this function may return a valid LogEntry object and a non-nil +// error value, when the error indicates a non-fatal parsing error. +func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) { + var err error + entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain} + + switch eType := rle.Leaf.TimestampedEntry.EntryType; eType { + case X509LogEntryType: + entry.X509Cert, err = rle.Leaf.X509Certificate() + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse certificate: %v", err) + } + + case PrecertLogEntryType: + var tbsCert *x509.Certificate + tbsCert, err = rle.Leaf.Precertificate() + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse precertificate: %v", err) + } + entry.Precert = &Precertificate{ + Submitted: rle.Cert, + IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, + TBSCertificate: tbsCert, + } + + default: + return nil, fmt.Errorf("unknown entry type: %v", eType) + } + + // err may be non-nil for a non-fatal error. + return &entry, err +} + +// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data +// after JSON parsing) into a LogEntry object (which includes x509.Certificate +// objects, after TLS and ASN.1 parsing). +// +// Note that this function may return a valid LogEntry object and a non-nil +// error value, when the error indicates a non-fatal parsing error. +func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) { + rle, err := RawLogEntryFromLeaf(index, leaf) + if err != nil { + return nil, err + } + return rle.ToLogEntry() +} + +// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds +// since UNIX epoch) to a Go Time. +func TimestampToTime(ts uint64) time.Time { + secs := int64(ts / 1000) + msecs := int64(ts % 1000) + return time.Unix(secs, msecs*1000000) +} diff --git a/vendor/github.com/google/certificate-transparency-go/signatures.go b/vendor/github.com/google/certificate-transparency-go/signatures.go new file mode 100644 index 00000000000..b009008c6f5 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/signatures.go @@ -0,0 +1,110 @@ +// Copyright 2015 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/pem" + "fmt" + "log" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +// AllowVerificationWithNonCompliantKeys may be set to true in order to allow +// SignatureVerifier to use keys which are technically non-compliant with +// RFC6962. +var AllowVerificationWithNonCompliantKeys = false + +// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error. +func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) { + p, rest := pem.Decode(b) + if p == nil { + return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b)) + } + k, err := x509.ParsePKIXPublicKey(p.Bytes) + return k, sha256.Sum256(p.Bytes), rest, err +} + +// PublicKeyFromB64 parses a base64-encoded public key. +func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) { + der, err := base64.StdEncoding.DecodeString(b64PubKey) + if err != nil { + return nil, fmt.Errorf("error decoding public key: %s", err) + } + return x509.ParsePKIXPublicKey(der) +} + +// SignatureVerifier can verify signatures on SCTs and STHs +type SignatureVerifier struct { + PubKey crypto.PublicKey +} + +// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey. +func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) { + switch pkType := pk.(type) { + case *rsa.PublicKey: + if pkType.N.BitLen() < 2048 { + e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen()) + if !AllowVerificationWithNonCompliantKeys { + return nil, e + } + log.Printf("WARNING: %v", e) + } + case *ecdsa.PublicKey: + params := *(pkType.Params()) + if params != *elliptic.P256().Params() { + e := fmt.Errorf("public is ECDSA, but not on the P256 curve") + if !AllowVerificationWithNonCompliantKeys { + return nil, e + } + log.Printf("WARNING: %v", e) + + } + default: + return nil, fmt.Errorf("unsupported public key type %v", pkType) + } + + return &SignatureVerifier{PubKey: pk}, nil +} + +// VerifySignature verifies the given signature sig matches the data. +func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error { + return tls.VerifySignature(s.PubKey, data, sig) +} + +// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry. +func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error { + sctData, err := SerializeSCTSignatureInput(sct, entry) + if err != nil { + return err + } + return s.VerifySignature(sctData, tls.DigitallySigned(sct.Signature)) +} + +// VerifySTHSignature verifies that the STH's signature is valid. +func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error { + sthData, err := SerializeSTHSignatureInput(sth) + if err != nil { + return err + } + return s.VerifySignature(sthData, tls.DigitallySigned(sth.TreeHeadSignature)) +} diff --git a/vendor/github.com/google/certificate-transparency-go/tls/signature.go b/vendor/github.com/google/certificate-transparency-go/tls/signature.go new file mode 100644 index 00000000000..bc174df2124 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/tls/signature.go @@ -0,0 +1,152 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tls + +import ( + "crypto" + "crypto/dsa" //nolint:staticcheck + "crypto/ecdsa" + _ "crypto/md5" // For registration side-effect + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" // For registration side-effect + _ "crypto/sha256" // For registration side-effect + _ "crypto/sha512" // For registration side-effect + "errors" + "fmt" + "log" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +type dsaSig struct { + R, S *big.Int +} + +func generateHash(algo HashAlgorithm, data []byte) ([]byte, crypto.Hash, error) { + var hashType crypto.Hash + switch algo { + case MD5: + hashType = crypto.MD5 + case SHA1: + hashType = crypto.SHA1 + case SHA224: + hashType = crypto.SHA224 + case SHA256: + hashType = crypto.SHA256 + case SHA384: + hashType = crypto.SHA384 + case SHA512: + hashType = crypto.SHA512 + default: + return nil, hashType, fmt.Errorf("unsupported Algorithm.Hash in signature: %v", algo) + } + + hasher := hashType.New() + if _, err := hasher.Write(data); err != nil { + return nil, hashType, fmt.Errorf("failed to write to hasher: %v", err) + } + return hasher.Sum([]byte{}), hashType, nil +} + +// VerifySignature verifies that the passed in signature over data was created by the given PublicKey. +func VerifySignature(pubKey crypto.PublicKey, data []byte, sig DigitallySigned) error { + hash, hashType, err := generateHash(sig.Algorithm.Hash, data) + if err != nil { + return err + } + + switch sig.Algorithm.Signature { + case RSA: + rsaKey, ok := pubKey.(*rsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify RSA signature with %T key", pubKey) + } + if err := rsa.VerifyPKCS1v15(rsaKey, hashType, hash, sig.Signature); err != nil { + return fmt.Errorf("failed to verify rsa signature: %v", err) + } + case DSA: + dsaKey, ok := pubKey.(*dsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify DSA signature with %T key", pubKey) + } + var dsaSig dsaSig + rest, err := asn1.Unmarshal(sig.Signature, &dsaSig) + if err != nil { + return fmt.Errorf("failed to unmarshal DSA signature: %v", err) + } + if len(rest) != 0 { + log.Printf("Garbage following signature %q", rest) + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("DSA signature contained zero or negative values") + } + if !dsa.Verify(dsaKey, hash, dsaSig.R, dsaSig.S) { + return errors.New("failed to verify DSA signature") + } + case ECDSA: + ecdsaKey, ok := pubKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify ECDSA signature with %T key", pubKey) + } + var ecdsaSig dsaSig + rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig) + if err != nil { + return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err) + } + if len(rest) != 0 { + log.Printf("Garbage following signature %q", rest) + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("ECDSA signature contained zero or negative values") + } + + if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) { + return errors.New("failed to verify ECDSA signature") + } + default: + return fmt.Errorf("unsupported Algorithm.Signature in signature: %v", sig.Algorithm.Hash) + } + return nil +} + +// CreateSignature builds a signature over the given data using the specified hash algorithm and private key. +func CreateSignature(privKey crypto.PrivateKey, hashAlgo HashAlgorithm, data []byte) (DigitallySigned, error) { + var sig DigitallySigned + sig.Algorithm.Hash = hashAlgo + hash, hashType, err := generateHash(sig.Algorithm.Hash, data) + if err != nil { + return sig, err + } + + switch privKey := privKey.(type) { + case rsa.PrivateKey: + sig.Algorithm.Signature = RSA + sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, &privKey, hashType, hash) + return sig, err + case ecdsa.PrivateKey: + sig.Algorithm.Signature = ECDSA + var ecdsaSig dsaSig + ecdsaSig.R, ecdsaSig.S, err = ecdsa.Sign(rand.Reader, &privKey, hash) + if err != nil { + return sig, err + } + sig.Signature, err = asn1.Marshal(ecdsaSig) + return sig, err + default: + return sig, fmt.Errorf("unsupported private key type %T", privKey) + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/tls/tls.go b/vendor/github.com/google/certificate-transparency-go/tls/tls.go new file mode 100644 index 00000000000..a48c998f48d --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/tls/tls.go @@ -0,0 +1,711 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tls implements functionality for dealing with TLS-encoded data, +// as defined in RFC 5246. This includes parsing and generation of TLS-encoded +// data, together with utility functions for dealing with the DigitallySigned +// TLS type. +package tls + +import ( + "bytes" + "encoding/binary" + "fmt" + "reflect" + "strconv" + "strings" +) + +// This file holds utility functions for TLS encoding/decoding data +// as per RFC 5246 section 4. + +// A structuralError suggests that the TLS data is valid, but the Go type +// which is receiving it doesn't match. +type structuralError struct { + field string + msg string +} + +func (e structuralError) Error() string { + var prefix string + if e.field != "" { + prefix = e.field + ": " + } + return "tls: structure error: " + prefix + e.msg +} + +// A syntaxError suggests that the TLS data is invalid. +type syntaxError struct { + field string + msg string +} + +func (e syntaxError) Error() string { + var prefix string + if e.field != "" { + prefix = e.field + ": " + } + return "tls: syntax error: " + prefix + e.msg +} + +// Uint24 is an unsigned 3-byte integer. +type Uint24 uint32 + +// Enum is an unsigned integer. +type Enum uint64 + +var ( + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint24Type = reflect.TypeOf(Uint24(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + enumType = reflect.TypeOf(Enum(0)) +) + +// Unmarshal parses the TLS-encoded data in b and uses the reflect package to +// fill in an arbitrary value pointed at by val. Because Unmarshal uses the +// reflect package, the structs being written to must use exported fields +// (upper case names). +// +// The mappings between TLS types and Go types is as follows; some fields +// must have tags (to indicate their encoded size). +// +// TLS Go Required Tags +// opaque byte / uint8 +// uint8 byte / uint8 +// uint16 uint16 +// uint24 tls.Uint24 +// uint32 uint32 +// uint64 uint64 +// enum tls.Enum size:S or maxval:N +// Type []Type minlen:N,maxlen:M +// opaque[N] [N]byte / [N]uint8 +// uint8[N] [N]byte / [N]uint8 +// struct { } struct { } +// select(T) { +// case e1: Type *T selector:Field,val:e1 +// } +// +// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the +// associated enumeration type is available earlier in the same enclosing +// struct, and each possible variant is marked with a selector tag (to +// indicate which field selects the variants) and a val tag (to indicate +// what value of the selector picks this particular field). +// +// For example, a TLS structure: +// +// enum { e1(1), e2(2) } EnumType; +// struct { +// EnumType sel; +// select(sel) { +// case e1: uint16 +// case e2: uint32 +// } data; +// } VariantItem; +// +// would have a corresponding Go type: +// +// type VariantItem struct { +// Sel tls.Enum `tls:"maxval:2"` +// Data16 *uint16 `tls:"selector:Sel,val:1"` +// Data32 *uint32 `tls:"selector:Sel,val:2"` +// } +// +// TLS fixed-length vectors of types other than opaque or uint8 are not supported. +// +// For TLS variable-length vectors that are themselves used in other vectors, +// create a single-field structure to represent the inner type. For example, for: +// +// opaque InnerType<1..65535>; +// struct { +// InnerType inners<1,65535>; +// } Something; +// +// convert to: +// +// type InnerType struct { +// Val []byte `tls:"minlen:1,maxlen:65535"` +// } +// type Something struct { +// Inners []InnerType `tls:"minlen:1,maxlen:65535"` +// } +// +// If the encoded value does not fit in the Go type, Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) ([]byte, error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) { + info, err := fieldTagToFieldInfo(params, "") + if err != nil { + return nil, err + } + // The passed in interface{} is a pointer (to allow the value to be written + // to); extract the pointed-to object as a reflect.Value, so parseField + // can do various introspection things. + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, info) + if err != nil { + return nil, err + } + return b[offset:], nil +} + +// Return the number of bytes needed to encode values up to (and including) x. +func byteCount(x uint64) uint { + switch { + case x < 0x100: + return 1 + case x < 0x10000: + return 2 + case x < 0x1000000: + return 3 + case x < 0x100000000: + return 4 + case x < 0x10000000000: + return 5 + case x < 0x1000000000000: + return 6 + case x < 0x100000000000000: + return 7 + default: + return 8 + } +} + +type fieldInfo struct { + count uint // Number of bytes + countSet bool + minlen uint64 // Only relevant for slices + maxlen uint64 // Only relevant for slices + selector string // Only relevant for select sub-values + val uint64 // Only relevant for select sub-values + name string // Used for better error messages +} + +func (i *fieldInfo) fieldName() string { + if i == nil { + return "" + } + return i.name +} + +// Given a tag string, return a fieldInfo describing the field. +func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) { + var info *fieldInfo + // Iterate over clauses in the tag, ignoring any that don't parse properly. + for _, part := range strings.Split(str, ",") { + switch { + case strings.HasPrefix(part, "maxval:"): + if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil { + info = &fieldInfo{count: byteCount(v), countSet: true} + } + case strings.HasPrefix(part, "size:"): + if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil { + info = &fieldInfo{count: uint(sz), countSet: true} + } + case strings.HasPrefix(part, "maxlen:"): + v, err := strconv.ParseUint(part[7:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.count = byteCount(v) + info.countSet = true + info.maxlen = v + case strings.HasPrefix(part, "minlen:"): + v, err := strconv.ParseUint(part[7:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.minlen = v + case strings.HasPrefix(part, "selector:"): + if info == nil { + info = &fieldInfo{} + } + info.selector = part[9:] + case strings.HasPrefix(part, "val:"): + v, err := strconv.ParseUint(part[4:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.val = v + } + } + if info != nil { + info.name = name + if info.selector == "" { + if info.count < 1 { + return nil, structuralError{name, "field of unknown size in " + str} + } else if info.count > 8 { + return nil, structuralError{name, "specified size too large in " + str} + } else if info.minlen > info.maxlen { + return nil, structuralError{name, "specified length range inverted in " + str} + } else if info.val > 0 { + return nil, structuralError{name, "specified selector value but not field in " + str} + } + } + } else if name != "" { + info = &fieldInfo{name: name} + } + return info, nil +} + +// Check that a value fits into a field described by a fieldInfo structure. +func (i fieldInfo) check(val uint64, fldName string) error { + if val >= (1 << (8 * i.count)) { + return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)} + } + if i.maxlen != 0 { + if val < i.minlen { + return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)} + } + if val > i.maxlen { + return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)} + } + } + return nil +} + +// readVarUint reads an big-endian unsigned integer of the given size in +// bytes. +func readVarUint(data []byte, info *fieldInfo) (uint64, error) { + if info == nil || !info.countSet { + return 0, structuralError{info.fieldName(), "no field size information available"} + } + if len(data) < int(info.count) { + return 0, syntaxError{info.fieldName(), "truncated variable-length integer"} + } + var result uint64 + for i := uint(0); i < info.count; i++ { + result = (result << 8) | uint64(data[i]) + } + if err := info.check(result, info.name); err != nil { + return 0, err + } + return result, nil +} + +// parseField is the main parsing function. Given a byte slice and an offset +// (in bytes) into the data, it will try to parse a suitable ASN.1 value out +// and store it in the given Value. +func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) { + offset := initOffset + rest := data[offset:] + + fieldType := v.Type() + // First look for known fixed types. + switch fieldType { + case uint8Type: + if len(rest) < 1 { + return offset, syntaxError{info.fieldName(), "truncated uint8"} + } + v.SetUint(uint64(rest[0])) + offset++ + return offset, nil + case uint16Type: + if len(rest) < 2 { + return offset, syntaxError{info.fieldName(), "truncated uint16"} + } + v.SetUint(uint64(binary.BigEndian.Uint16(rest))) + offset += 2 + return offset, nil + case uint24Type: + if len(rest) < 3 { + return offset, syntaxError{info.fieldName(), "truncated uint24"} + } + v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2])) + offset += 3 + return offset, nil + case uint32Type: + if len(rest) < 4 { + return offset, syntaxError{info.fieldName(), "truncated uint32"} + } + v.SetUint(uint64(binary.BigEndian.Uint32(rest))) + offset += 4 + return offset, nil + case uint64Type: + if len(rest) < 8 { + return offset, syntaxError{info.fieldName(), "truncated uint64"} + } + v.SetUint(uint64(binary.BigEndian.Uint64(rest))) + offset += 8 + return offset, nil + } + + // Now deal with user-defined types. + switch v.Kind() { + case enumType.Kind(): + // Assume that anything of the same kind as Enum is an Enum, so that + // users can alias types of their own to Enum. + val, err := readVarUint(rest, info) + if err != nil { + return offset, err + } + v.SetUint(val) + offset += int(info.count) + return offset, nil + case reflect.Struct: + structType := fieldType + // TLS includes a select(Enum) {..} construct, where the value of an enum + // indicates which variant field is present (like a C union). We require + // that the enum value be an earlier field in the same structure (the selector), + // and that each of the possible variant destination fields be pointers. + // So the Go mapping looks like: + // type variantType struct { + // Which tls.Enum `tls:"size:1"` // this is the selector + // Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination + // Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination + // } + + // To deal with this, we track any enum-like fields and their values... + enums := make(map[string]uint64) + // .. and we track which selector names we've seen (in the destination field tags), + // and whether a destination for that selector has been chosen. + selectorSeen := make(map[string]bool) + for i := 0; i < structType.NumField(); i++ { + // Find information about this field. + tag := structType.Field(i).Tag.Get("tls") + fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name) + if err != nil { + return offset, err + } + + destination := v.Field(i) + if fieldInfo.selector != "" { + // This is a possible select(Enum) destination, so first check that the referenced + // selector field has already been seen earlier in the struct. + choice, ok := enums[fieldInfo.selector] + if !ok { + return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector} + } + if structType.Field(i).Type.Kind() != reflect.Ptr { + return offset, structuralError{fieldInfo.name, "choice field not a pointer type"} + } + // Is this the first mention of the selector field name? If so, remember it. + seen, ok := selectorSeen[fieldInfo.selector] + if !ok { + selectorSeen[fieldInfo.selector] = false + } + if choice != fieldInfo.val { + // This destination field was not the chosen one, so make it nil (we checked + // it was a pointer above). + v.Field(i).Set(reflect.Zero(structType.Field(i).Type)) + continue + } + if seen { + // We already saw a different destination field receive the value for this + // selector value, which indicates a badly annotated structure. + return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector} + } + selectorSeen[fieldInfo.selector] = true + // Make an object of the pointed-to type and parse into that. + v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem())) + destination = v.Field(i).Elem() + } + offset, err = parseField(destination, data, offset, fieldInfo) + if err != nil { + return offset, err + } + + // Remember any possible tls.Enum values encountered in case they are selectors. + if structType.Field(i).Type.Kind() == enumType.Kind() { + enums[structType.Field(i).Name] = v.Field(i).Uint() + } + + } + + // Now we have seen all fields in the structure, check that all select(Enum) {..} selector + // fields found a destination to put their data in. + for selector, seen := range selectorSeen { + if !seen { + return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"} + } + } + return offset, nil + case reflect.Array: + datalen := v.Len() + + if datalen > len(rest) { + return offset, syntaxError{info.fieldName(), "truncated array"} + } + inner := rest[:datalen] + offset += datalen + if fieldType.Elem().Kind() != reflect.Uint8 { + // Only byte/uint8 arrays are supported + return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()} + } + reflect.Copy(v, reflect.ValueOf(inner)) + return offset, nil + + case reflect.Slice: + sliceType := fieldType + // Slices represent variable-length vectors, which are prefixed by a length field. + // The fieldInfo indicates the size of that length field. + varlen, err := readVarUint(rest, info) + if err != nil { + return offset, err + } + datalen := int(varlen) + offset += int(info.count) + rest = rest[info.count:] + + if datalen > len(rest) { + return offset, syntaxError{info.fieldName(), "truncated slice"} + } + inner := rest[:datalen] + offset += datalen + if fieldType.Elem().Kind() == reflect.Uint8 { + // Fast version for []byte + v.Set(reflect.MakeSlice(sliceType, datalen, datalen)) + reflect.Copy(v, reflect.ValueOf(inner)) + return offset, nil + } + + v.Set(reflect.MakeSlice(sliceType, 0, datalen)) + single := reflect.New(sliceType.Elem()) + for innerOffset := 0; innerOffset < len(inner); { + var err error + innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil) + if err != nil { + return offset, err + } + v.Set(reflect.Append(v, single.Elem())) + } + return offset, nil + + default: + return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())} + } +} + +// Marshal returns the TLS encoding of val. +func Marshal(val interface{}) ([]byte, error) { + return MarshalWithParams(val, "") +} + +// MarshalWithParams returns the TLS encoding of val, and allows field +// parameters to be specified for the top-level element. The form +// of the params is the same as the field tags. +func MarshalWithParams(val interface{}, params string) ([]byte, error) { + info, err := fieldTagToFieldInfo(params, "") + if err != nil { + return nil, err + } + var out bytes.Buffer + v := reflect.ValueOf(val) + if err := marshalField(&out, v, info); err != nil { + return nil, err + } + return out.Bytes(), err +} + +func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error { + var prefix string + if info != nil && len(info.name) > 0 { + prefix = info.name + ": " + } + fieldType := v.Type() + // First look for known fixed types. + switch fieldType { + case uint8Type: + out.WriteByte(byte(v.Uint())) + return nil + case uint16Type: + scratch := make([]byte, 2) + binary.BigEndian.PutUint16(scratch, uint16(v.Uint())) + out.Write(scratch) + return nil + case uint24Type: + i := v.Uint() + if i > 0xffffff { + return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)} + } + scratch := make([]byte, 4) + binary.BigEndian.PutUint32(scratch, uint32(i)) + out.Write(scratch[1:]) + return nil + case uint32Type: + scratch := make([]byte, 4) + binary.BigEndian.PutUint32(scratch, uint32(v.Uint())) + out.Write(scratch) + return nil + case uint64Type: + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(v.Uint())) + out.Write(scratch) + return nil + } + + // Now deal with user-defined types. + switch v.Kind() { + case enumType.Kind(): + i := v.Uint() + if info == nil { + return structuralError{info.fieldName(), "enum field tag missing"} + } + if err := info.check(i, prefix); err != nil { + return err + } + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(i)) + out.Write(scratch[(8 - info.count):]) + return nil + case reflect.Struct: + structType := fieldType + enums := make(map[string]uint64) // Values of any Enum fields + // The comment parseField() describes the mapping of the TLS select(Enum) {..} construct; + // here we have selector and source (rather than destination) fields. + + // Track which selector names we've seen (in the source field tags), and whether a source + // value for that selector has been processed. + selectorSeen := make(map[string]bool) + for i := 0; i < structType.NumField(); i++ { + // Find information about this field. + tag := structType.Field(i).Tag.Get("tls") + fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name) + if err != nil { + return err + } + + source := v.Field(i) + if fieldInfo.selector != "" { + // This field is a possible source for a select(Enum) {..}. First check + // the selector field name has been seen. + choice, ok := enums[fieldInfo.selector] + if !ok { + return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector} + } + if structType.Field(i).Type.Kind() != reflect.Ptr { + return structuralError{fieldInfo.name, "choice field not a pointer type"} + } + // Is this the first mention of the selector field name? If so, remember it. + seen, ok := selectorSeen[fieldInfo.selector] + if !ok { + selectorSeen[fieldInfo.selector] = false + } + if choice != fieldInfo.val { + // This source was not chosen; police that it should be nil. + if v.Field(i).Pointer() != uintptr(0) { + return structuralError{fieldInfo.name, "unchosen field is non-nil"} + } + continue + } + if seen { + // We already saw a different source field generate the value for this + // selector value, which indicates a badly annotated structure. + return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector} + } + selectorSeen[fieldInfo.selector] = true + if v.Field(i).Pointer() == uintptr(0) { + return structuralError{fieldInfo.name, "chosen field is nil"} + } + // Marshal from the pointed-to source object. + source = v.Field(i).Elem() + } + + var fieldData bytes.Buffer + if err := marshalField(&fieldData, source, fieldInfo); err != nil { + return err + } + out.Write(fieldData.Bytes()) + + // Remember any tls.Enum values encountered in case they are selectors. + if structType.Field(i).Type.Kind() == enumType.Kind() { + enums[structType.Field(i).Name] = v.Field(i).Uint() + } + } + // Now we have seen all fields in the structure, check that all select(Enum) {..} selector + // fields found a source field to get their data from. + for selector, seen := range selectorSeen { + if !seen { + return syntaxError{info.fieldName(), selector + ": unhandled value for selector"} + } + } + return nil + + case reflect.Array: + datalen := v.Len() + arrayType := fieldType + if arrayType.Elem().Kind() != reflect.Uint8 { + // Only byte/uint8 arrays are supported + return structuralError{info.fieldName(), "unsupported array type"} + } + bytes := make([]byte, datalen) + for i := 0; i < datalen; i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err := out.Write(bytes) + return err + + case reflect.Slice: + if info == nil { + return structuralError{info.fieldName(), "slice field tag missing"} + } + + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + // Fast version for []byte: first write the length as info.count bytes. + datalen := v.Len() + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(datalen)) + out.Write(scratch[(8 - info.count):]) + + if err := info.check(uint64(datalen), prefix); err != nil { + return err + } + // Then just write the data. + bytes := make([]byte, datalen) + for i := 0; i < datalen; i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err := out.Write(bytes) + return err + } + // General version: use a separate Buffer to write the slice entries into. + var innerBuf bytes.Buffer + for i := 0; i < v.Len(); i++ { + if err := marshalField(&innerBuf, v.Index(i), nil); err != nil { + return err + } + } + + // Now insert (and check) the size. + size := uint64(innerBuf.Len()) + if err := info.check(size, prefix); err != nil { + return err + } + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, size) + out.Write(scratch[(8 - info.count):]) + + // Then copy the data. + _, err := out.Write(innerBuf.Bytes()) + return err + + default: + return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())} + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/tls/types.go b/vendor/github.com/google/certificate-transparency-go/tls/types.go new file mode 100644 index 00000000000..b8eaf24bdd5 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/tls/types.go @@ -0,0 +1,117 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tls + +import ( + "crypto" + "crypto/dsa" //nolint:staticcheck + "crypto/ecdsa" + "crypto/rsa" + "fmt" +) + +// DigitallySigned gives information about a signature, including the algorithm used +// and the signature value. Defined in RFC 5246 s4.7. +type DigitallySigned struct { + Algorithm SignatureAndHashAlgorithm + Signature []byte `tls:"minlen:0,maxlen:65535"` +} + +func (d DigitallySigned) String() string { + return fmt.Sprintf("Signature: HashAlgo=%v SignAlgo=%v Value=%x", d.Algorithm.Hash, d.Algorithm.Signature, d.Signature) +} + +// SignatureAndHashAlgorithm gives information about the algorithms used for a +// signature. Defined in RFC 5246 s7.4.1.4.1. +type SignatureAndHashAlgorithm struct { + Hash HashAlgorithm `tls:"maxval:255"` + Signature SignatureAlgorithm `tls:"maxval:255"` +} + +// HashAlgorithm enum from RFC 5246 s7.4.1.4.1. +type HashAlgorithm Enum + +// HashAlgorithm constants from RFC 5246 s7.4.1.4.1. +const ( + None HashAlgorithm = 0 + MD5 HashAlgorithm = 1 + SHA1 HashAlgorithm = 2 + SHA224 HashAlgorithm = 3 + SHA256 HashAlgorithm = 4 + SHA384 HashAlgorithm = 5 + SHA512 HashAlgorithm = 6 +) + +func (h HashAlgorithm) String() string { + switch h { + case None: + return "None" + case MD5: + return "MD5" + case SHA1: + return "SHA1" + case SHA224: + return "SHA224" + case SHA256: + return "SHA256" + case SHA384: + return "SHA384" + case SHA512: + return "SHA512" + default: + return fmt.Sprintf("UNKNOWN(%d)", h) + } +} + +// SignatureAlgorithm enum from RFC 5246 s7.4.1.4.1. +type SignatureAlgorithm Enum + +// SignatureAlgorithm constants from RFC 5246 s7.4.1.4.1. +const ( + Anonymous SignatureAlgorithm = 0 + RSA SignatureAlgorithm = 1 + DSA SignatureAlgorithm = 2 + ECDSA SignatureAlgorithm = 3 +) + +func (s SignatureAlgorithm) String() string { + switch s { + case Anonymous: + return "Anonymous" + case RSA: + return "RSA" + case DSA: + return "DSA" + case ECDSA: + return "ECDSA" + default: + return fmt.Sprintf("UNKNOWN(%d)", s) + } +} + +// SignatureAlgorithmFromPubKey returns the algorithm used for this public key. +// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous. +func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm { + switch k.(type) { + case *ecdsa.PublicKey: + return ECDSA + case *rsa.PublicKey: + return RSA + case *dsa.PublicKey: + return DSA + default: + return Anonymous + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/google/certificate-transparency-go/types.go new file mode 100644 index 00000000000..2a96f6a09f5 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/types.go @@ -0,0 +1,591 @@ +// Copyright 2015 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ct holds core types and utilities for Certificate Transparency. +package ct + +import ( + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +/////////////////////////////////////////////////////////////////////////////// +// The following structures represent those outlined in RFC6962; any section +// numbers mentioned refer to that RFC. +/////////////////////////////////////////////////////////////////////////////// + +// LogEntryType represents the LogEntryType enum from section 3.1: +// +// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType; +type LogEntryType tls.Enum // tls:"maxval:65535" + +// LogEntryType constants from section 3.1. +const ( + X509LogEntryType LogEntryType = 0 + PrecertLogEntryType LogEntryType = 1 +) + +func (e LogEntryType) String() string { + switch e { + case X509LogEntryType: + return "X509LogEntryType" + case PrecertLogEntryType: + return "PrecertLogEntryType" + default: + return fmt.Sprintf("UnknownEntryType(%d)", e) + } +} + +// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance. +const ( + TreeLeafPrefix = byte(0x00) + TreeNodePrefix = byte(0x01) +) + +// MerkleLeafType represents the MerkleLeafType enum from section 3.4: +// +// enum { timestamped_entry(0), (255) } MerkleLeafType; +type MerkleLeafType tls.Enum // tls:"maxval:255" + +// TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4. +const TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT + +func (m MerkleLeafType) String() string { + switch m { + case TimestampedEntryLeafType: + return "TimestampedEntryLeafType" + default: + return fmt.Sprintf("UnknownLeafType(%d)", m) + } +} + +// Version represents the Version enum from section 3.2: +// +// enum { v1(0), (255) } Version; +type Version tls.Enum // tls:"maxval:255" + +// CT Version constants from section 3.2. +const ( + V1 Version = 0 +) + +func (v Version) String() string { + switch v { + case V1: + return "V1" + default: + return fmt.Sprintf("UnknownVersion(%d)", v) + } +} + +// SignatureType differentiates STH signatures from SCT signatures, see section 3.2. +// +// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType; +type SignatureType tls.Enum // tls:"maxval:255" + +// SignatureType constants from section 3.2. +const ( + CertificateTimestampSignatureType SignatureType = 0 + TreeHashSignatureType SignatureType = 1 +) + +func (st SignatureType) String() string { + switch st { + case CertificateTimestampSignatureType: + return "CertificateTimestamp" + case TreeHashSignatureType: + return "TreeHash" + default: + return fmt.Sprintf("UnknownSignatureType(%d)", st) + } +} + +// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate +// (section 3.1). +type ASN1Cert struct { + Data []byte `tls:"minlen:1,maxlen:16777215"` +} + +// LogID holds the hash of the Log's public key (section 3.2). +// TODO(pphaneuf): Users should be migrated to the one in the logid package. +type LogID struct { + KeyID [sha256.Size]byte +} + +// PreCert represents a Precertificate (section 3.2). +type PreCert struct { + IssuerKeyHash [sha256.Size]byte + TBSCertificate []byte `tls:"minlen:1,maxlen:16777215"` // DER-encoded TBSCertificate +} + +// CTExtensions is a representation of the raw bytes of any CtExtension +// structure (see section 3.2). +// nolint: revive +type CTExtensions []byte // tls:"minlen:0,maxlen:65535"` + +// MerkleTreeNode represents an internal node in the CT tree. +type MerkleTreeNode []byte + +// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and +// 4.4). +type ConsistencyProof []MerkleTreeNode + +// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5). +type AuditPath []MerkleTreeNode + +// LeafInput represents a serialized MerkleTreeLeaf structure. +type LeafInput []byte + +// DigitallySigned is a local alias for tls.DigitallySigned so that we can +// attach a MarshalJSON method. +type DigitallySigned tls.DigitallySigned + +// FromBase64String populates the DigitallySigned structure from the base64 data passed in. +// Returns an error if the base64 data is invalid. +func (d *DigitallySigned) FromBase64String(b64 string) error { + raw, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err) + } + var ds tls.DigitallySigned + if rest, err := tls.Unmarshal(raw, &ds); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } else if len(rest) > 0 { + return fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + *d = DigitallySigned(ds) + return nil +} + +// Base64String returns the base64 representation of the DigitallySigned struct. +func (d DigitallySigned) Base64String() (string, error) { + b, err := tls.Marshal(d) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b), nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (d DigitallySigned) MarshalJSON() ([]byte, error) { + b64, err := d.Base64String() + if err != nil { + return []byte{}, err + } + return []byte(`"` + b64 + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *DigitallySigned) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } + return d.FromBase64String(content) +} + +// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log. +type RawLogEntry struct { + // Index is a position of the entry in the log. + Index int64 + // Leaf is a parsed Merkle leaf hash input. + Leaf MerkleTreeLeaf + // Cert is: + // - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType. + // - A precertificate if Leaf.TimestampedEntry.EntryType is + // PrecertLogEntryType, in the form of a DER-encoded Certificate as + // originally added (which includes the poison extension and a signature + // generated over the pre-cert by the pre-cert issuer). + // - Empty otherwise. + Cert ASN1Cert + // Chain is the issuing certificate chain starting with the issuer of Cert, + // or an empty slice if Cert is empty. + Chain []ASN1Cert +} + +// LogEntry represents the (parsed) contents of an entry in a CT log. This is described +// in section 3.1, but note that this structure does *not* match the TLS structure +// defined there (the TLS structure is never used directly in RFC6962). +type LogEntry struct { + Index int64 + Leaf MerkleTreeLeaf + // Exactly one of the following three fields should be non-empty. + X509Cert *x509.Certificate // Parsed X.509 certificate + Precert *Precertificate // Extracted precertificate + JSONData []byte + + // Chain holds the issuing certificate chain, starting with the + // issuer of the leaf certificate / pre-certificate. + Chain []ASN1Cert +} + +// PrecertChainEntry holds an precertificate together with a validation chain +// for it; see section 3.1. +type PrecertChainEntry struct { + PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` + CertificateChain []ASN1Cert `tls:"minlen:0,maxlen:16777215"` +} + +// CertificateChain holds a chain of certificates, as returned as extra data +// for get-entries (section 4.6). +type CertificateChain struct { + Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"` +} + +// PrecertChainEntryHash is an extended PrecertChainEntry type with the +// IssuanceChainHash field added to store the hash of the +// CertificateChain field of PrecertChainEntry. +type PrecertChainEntryHash struct { + PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + +// CertificateChainHash is an extended CertificateChain type with the +// IssuanceChainHash field added to store the hash of the +// Entries field of CertificateChain. +type CertificateChainHash struct { + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + +// JSONDataEntry holds arbitrary data. +type JSONDataEntry struct { + Data []byte `tls:"minlen:0,maxlen:1677215"` +} + +// SHA256Hash represents the output from the SHA256 hash function. +type SHA256Hash [sha256.Size]byte + +// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in. +func (s *SHA256Hash) FromBase64String(b64 string) error { + bs, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 LogID: %v", err) + } + if len(bs) != sha256.Size { + return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs)) + } + copy(s[:], bs) + return nil +} + +// Base64String returns the base64 representation of this SHA256Hash. +func (s SHA256Hash) Base64String() string { + return base64.StdEncoding.EncodeToString(s[:]) +} + +// MarshalJSON implements the json.Marshaller interface for SHA256Hash. +func (s SHA256Hash) MarshalJSON() ([]byte, error) { + return []byte(`"` + s.Base64String() + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (s *SHA256Hash) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err) + } + return s.FromBase64String(content) +} + +// SignedTreeHead represents the structure returned by the get-sth CT method +// after base64 decoding; see sections 3.5 and 4.3. +type SignedTreeHead struct { + Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms + TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree + Timestamp uint64 `json:"timestamp"` // The time at which the STH was created + SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree + TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // Log's signature over a TLS-encoded TreeHeadSignature + LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key +} + +func (s SignedTreeHead) String() string { + sigStr, err := s.TreeHeadSignature.Base64String() + if err != nil { + sigStr = tls.DigitallySigned(s.TreeHeadSignature).String() + } + + // If the LogID field in the SignedTreeHead is empty, don't include it in + // the string. + var logIDStr string + if id, empty := s.LogID, (SHA256Hash{}); id != empty { + logIDStr = fmt.Sprintf("LogID:%s, ", id.Base64String()) + } + + return fmt.Sprintf("{%sTreeSize:%d, Timestamp:%d, SHA256RootHash:%q, TreeHeadSignature:%q}", + logIDStr, s.TreeSize, s.Timestamp, s.SHA256RootHash.Base64String(), sigStr) +} + +// TreeHeadSignature holds the data over which the signature in an STH is +// generated; see section 3.5 +type TreeHeadSignature struct { + Version Version `tls:"maxval:255"` + SignatureType SignatureType `tls:"maxval:255"` // == TreeHashSignatureType + Timestamp uint64 + TreeSize uint64 + SHA256RootHash SHA256Hash +} + +// SignedCertificateTimestamp represents the structure returned by the +// add-chain and add-pre-chain methods after base64 decoding; see sections +// 3.2, 4.1 and 4.2. +type SignedCertificateTimestamp struct { + SCTVersion Version `tls:"maxval:255"` + LogID LogID + Timestamp uint64 + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` + Signature DigitallySigned // Signature over TLS-encoded CertificateTimestamp +} + +// CertificateTimestamp is the collection of data that the signature in an +// SCT is over; see section 3.2. +type CertificateTimestamp struct { + SCTVersion Version `tls:"maxval:255"` + SignatureType SignatureType `tls:"maxval:255"` + Timestamp uint64 + EntryType LogEntryType `tls:"maxval:65535"` + X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` + PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` + JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` +} + +func (s SignedCertificateTimestamp) String() string { + return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion, + base64.StdEncoding.EncodeToString(s.LogID.KeyID[:]), + s.Timestamp, + s.Extensions, + s.Signature) +} + +// TimestampedEntry is part of the MerkleTreeLeaf structure; see section 3.4. +type TimestampedEntry struct { + Timestamp uint64 + EntryType LogEntryType `tls:"maxval:65535"` + X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` + PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` + JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` +} + +// MerkleTreeLeaf represents the deserialized structure of the hash input for the +// leaves of a log's Merkle tree; see section 3.4. +type MerkleTreeLeaf struct { + Version Version `tls:"maxval:255"` + LeafType MerkleLeafType `tls:"maxval:255"` + TimestampedEntry *TimestampedEntry `tls:"selector:LeafType,val:0"` +} + +// Precertificate represents the parsed CT Precertificate structure. +type Precertificate struct { + // DER-encoded pre-certificate as originally added, which includes a + // poison extension and a signature generated over the pre-cert by + // the pre-cert issuer (which might differ from the issuer of the final + // cert, see RFC6962 s3.1). + Submitted ASN1Cert + // SHA256 hash of the issuing key + IssuerKeyHash [sha256.Size]byte + // Parsed TBSCertificate structure, held in an x509.Certificate for convenience. + TBSCertificate *x509.Certificate +} + +// X509Certificate returns the X.509 Certificate contained within the +// MerkleTreeLeaf. +func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) { + if m.TimestampedEntry.EntryType != X509LogEntryType { + return nil, fmt.Errorf("cannot call X509Certificate on a MerkleTreeLeaf that is not an X509 entry") + } + return x509.ParseCertificate(m.TimestampedEntry.X509Entry.Data) +} + +// Precertificate returns the X.509 Precertificate contained within the MerkleTreeLeaf. +// +// The returned precertificate is embedded in an x509.Certificate, but is in the +// form stored internally in the log rather than the original submitted form +// (i.e. it does not include the poison extension and any changes to reflect the +// final certificate's issuer have been made; see x509.BuildPrecertTBS). +func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) { + if m.TimestampedEntry.EntryType != PrecertLogEntryType { + return nil, fmt.Errorf("cannot call Precertificate on a MerkleTreeLeaf that is not a precert entry") + } + return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate) +} + +// APIEndpoint is a string that represents one of the Certificate Transparency +// Log API endpoints. +type APIEndpoint string + +// Certificate Transparency Log API endpoints; see section 4. +// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If +// changing these constants, may need to change those too. +const ( + AddChainStr APIEndpoint = "add-chain" + AddPreChainStr APIEndpoint = "add-pre-chain" + GetSTHStr APIEndpoint = "get-sth" + GetEntriesStr APIEndpoint = "get-entries" + GetProofByHashStr APIEndpoint = "get-proof-by-hash" + GetSTHConsistencyStr APIEndpoint = "get-sth-consistency" + GetRootsStr APIEndpoint = "get-roots" + GetEntryAndProofStr APIEndpoint = "get-entry-and-proof" +) + +// URI paths for Log requests; see section 4. +// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If +// changing these constants, may need to change those too. +const ( + AddChainPath = "/ct/v1/add-chain" + AddPreChainPath = "/ct/v1/add-pre-chain" + GetSTHPath = "/ct/v1/get-sth" + GetEntriesPath = "/ct/v1/get-entries" + GetProofByHashPath = "/ct/v1/get-proof-by-hash" + GetSTHConsistencyPath = "/ct/v1/get-sth-consistency" + GetRootsPath = "/ct/v1/get-roots" + GetEntryAndProofPath = "/ct/v1/get-entry-and-proof" + + AddJSONPath = "/ct/v1/add-json" // Experimental addition +) + +// AddChainRequest represents the JSON request body sent to the add-chain and +// add-pre-chain POST methods from sections 4.1 and 4.2. +type AddChainRequest struct { + Chain [][]byte `json:"chain"` +} + +// AddChainResponse represents the JSON response to the add-chain and +// add-pre-chain POST methods. +// An SCT represents a Log's promise to integrate a [pre-]certificate into the +// log within a defined period of time. +type AddChainResponse struct { + SCTVersion Version `json:"sct_version"` // SCT structure version + ID []byte `json:"id"` // Log ID + Timestamp uint64 `json:"timestamp"` // Timestamp of issuance + Extensions string `json:"extensions"` // Holder for any CT extensions + Signature []byte `json:"signature"` // Log signature for this SCT +} + +// ToSignedCertificateTimestamp creates a SignedCertificateTimestamp from the +// AddChainResponse. +func (r *AddChainResponse) ToSignedCertificateTimestamp() (*SignedCertificateTimestamp, error) { + sct := SignedCertificateTimestamp{ + SCTVersion: r.SCTVersion, + Timestamp: r.Timestamp, + } + + if len(r.ID) != sha256.Size { + return nil, fmt.Errorf("id is invalid length, expected %d got %d", sha256.Size, len(r.ID)) + } + copy(sct.LogID.KeyID[:], r.ID) + + exts, err := base64.StdEncoding.DecodeString(r.Extensions) + if err != nil { + return nil, fmt.Errorf("invalid base64 data in Extensions (%q): %v", r.Extensions, err) + } + sct.Extensions = CTExtensions(exts) + + var ds DigitallySigned + if rest, err := tls.Unmarshal(r.Signature, &ds); err != nil { + return nil, fmt.Errorf("tls.Unmarshal(): %s", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + sct.Signature = ds + + return &sct, nil +} + +// AddJSONRequest represents the JSON request body sent to the add-json POST method. +// The corresponding response re-uses AddChainResponse. +// This is an experimental addition not covered by RFC6962. +type AddJSONRequest struct { + Data interface{} `json:"data"` +} + +// GetSTHResponse represents the JSON response to the get-sth GET method from section 4.3. +type GetSTHResponse struct { + TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree + Timestamp uint64 `json:"timestamp"` // Time that the tree was created + SHA256RootHash []byte `json:"sha256_root_hash"` // Root hash of the tree + TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH +} + +// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse. +func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) { + sth := SignedTreeHead{ + TreeSize: r.TreeSize, + Timestamp: r.Timestamp, + } + + if len(r.SHA256RootHash) != sha256.Size { + return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash)) + } + copy(sth.SHA256RootHash[:], r.SHA256RootHash) + + var ds DigitallySigned + if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil { + return nil, fmt.Errorf("tls.Unmarshal(): %s", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + sth.TreeHeadSignature = ds + + return &sth, nil +} + +// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency +// GET method from section 4.4. (The corresponding GET request has parameters 'first' and +// 'second'.) +type GetSTHConsistencyResponse struct { + Consistency [][]byte `json:"consistency"` +} + +// GetProofByHashResponse represents the JSON response to the get-proof-by-hash GET +// method from section 4.5. (The corresponding GET request has parameters 'hash' +// and 'tree_size'.) +type GetProofByHashResponse struct { + LeafIndex int64 `json:"leaf_index"` // The 0-based index of the end entity corresponding to the "hash" parameter. + AuditPath [][]byte `json:"audit_path"` // An array of base64-encoded Merkle Tree nodes proving the inclusion of the chosen certificate. +} + +// LeafEntry represents a leaf in the Log's Merkle tree, as returned by the get-entries +// GET method from section 4.6. +type LeafEntry struct { + // LeafInput is a TLS-encoded MerkleTreeLeaf + LeafInput []byte `json:"leaf_input"` + // ExtraData holds (unsigned) extra data, normally the cert validation chain. + ExtraData []byte `json:"extra_data"` +} + +// GetEntriesResponse represents the JSON response to the get-entries GET method +// from section 4.6. +type GetEntriesResponse struct { + Entries []LeafEntry `json:"entries"` // the list of returned entries +} + +// GetRootsResponse represents the JSON response to the get-roots GET method from section 4.7. +type GetRootsResponse struct { + Certificates []string `json:"certificates"` +} + +// GetEntryAndProofResponse represents the JSON response to the get-entry-and-proof +// GET method from section 4.8. (The corresponding GET request has parameters 'leaf_index' +// and 'tree_size'.) +type GetEntryAndProofResponse struct { + LeafInput []byte `json:"leaf_input"` // the entry itself + ExtraData []byte `json:"extra_data"` // any chain provided when the entry was added to the log + AuditPath [][]byte `json:"audit_path"` // the corresponding proof +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/README.md b/vendor/github.com/google/certificate-transparency-go/x509/README.md new file mode 100644 index 00000000000..6f22f5f8344 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/README.md @@ -0,0 +1,7 @@ +# Important Notice + +This is a fork of the `crypto/x509` Go package. The original source can be found on +[GitHub](https://github.com/golang/go). + +Be careful about making local modifications to this code as it will +make maintenance harder in future. diff --git a/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go new file mode 100644 index 00000000000..4823d594633 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/pem" + "errors" + "runtime" +) + +// CertPool is a set of certificates. +type CertPool struct { + bySubjectKeyId map[string][]int + byName map[string][]int + certs []*Certificate +} + +// NewCertPool returns a new, empty CertPool. +func NewCertPool() *CertPool { + return &CertPool{ + bySubjectKeyId: make(map[string][]int), + byName: make(map[string][]int), + } +} + +func (s *CertPool) copy() *CertPool { + p := &CertPool{ + bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)), + byName: make(map[string][]int, len(s.byName)), + certs: make([]*Certificate, len(s.certs)), + } + for k, v := range s.bySubjectKeyId { + indexes := make([]int, len(v)) + copy(indexes, v) + p.bySubjectKeyId[k] = indexes + } + for k, v := range s.byName { + indexes := make([]int, len(v)) + copy(indexes, v) + p.byName[k] = indexes + } + copy(p.certs, s.certs) + return p +} + +// SystemCertPool returns a copy of the system cert pool. +// +// Any mutations to the returned pool are not written to disk and do +// not affect any other pool returned by SystemCertPool. +// +// New changes in the system cert pool might not be reflected +// in subsequent calls. +func SystemCertPool() (*CertPool, error) { + if runtime.GOOS == "windows" { + // Issue 16736, 18609: + return nil, errors.New("crypto/x509: system root pool is not available on Windows") + } + + if sysRoots := systemRootsPool(); sysRoots != nil { + return sysRoots.copy(), nil + } + + return loadSystemRoots() +} + +// findPotentialParents returns the indexes of certificates in s which might +// have signed cert. The caller must not modify the returned slice. +func (s *CertPool) findPotentialParents(cert *Certificate) []int { + if s == nil { + return nil + } + + var candidates []int + if len(cert.AuthorityKeyId) > 0 { + candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)] + } + if len(candidates) == 0 { + candidates = s.byName[string(cert.RawIssuer)] + } + return candidates +} + +func (s *CertPool) contains(cert *Certificate) bool { + if s == nil { + return false + } + + candidates := s.byName[string(cert.RawSubject)] + for _, c := range candidates { + if s.certs[c].Equal(cert) { + return true + } + } + + return false +} + +// AddCert adds a certificate to a pool. +func (s *CertPool) AddCert(cert *Certificate) { + if cert == nil { + panic("adding nil Certificate to CertPool") + } + + // Check that the certificate isn't being added twice. + if s.contains(cert) { + return + } + + n := len(s.certs) + s.certs = append(s.certs, cert) + + if len(cert.SubjectKeyId) > 0 { + keyId := string(cert.SubjectKeyId) + s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n) + } + name := string(cert.RawSubject) + s.byName[name] = append(s.byName[name], n) +} + +// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates. +// It appends any certificates found to s and reports whether any certificates +// were successfully parsed. +// +// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set +// of root CAs in a format suitable for this function. +func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := ParseCertificate(block.Bytes) + if IsFatal(err) { + continue + } + + s.AddCert(cert) + ok = true + } + + return +} + +// Subjects returns a list of the DER-encoded subjects of +// all of the certificates in the pool. +func (s *CertPool) Subjects() [][]byte { + res := make([][]byte, len(s.certs)) + for i, c := range s.certs { + res[i] = c.RawSubject + } + return res +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/curves.go b/vendor/github.com/google/certificate-transparency-go/x509/curves.go new file mode 100644 index 00000000000..0e2778cb353 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/curves.go @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +// This file holds ECC curves that are not supported by the main Go crypto/elliptic +// library, but which have been observed in certificates in the wild. + +var initonce sync.Once +var p192r1 *elliptic.CurveParams + +func initAllCurves() { + initSECP192R1() +} + +func initSECP192R1() { + // See SEC-2, section 2.2.2 + p192r1 = &elliptic.CurveParams{Name: "P-192"} + p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16) + p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16) + p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16) + p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16) + p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16) + p192r1.BitSize = 192 +} + +func secp192r1() elliptic.Curve { + initonce.Do(initAllCurves) + return p192r1 +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/error.go b/vendor/github.com/google/certificate-transparency-go/x509/error.go new file mode 100644 index 00000000000..40b7ef7d9fe --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/error.go @@ -0,0 +1,236 @@ +package x509 + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// Error implements the error interface and describes a single error in an X.509 certificate or CRL. +type Error struct { + ID ErrorID + Category ErrCategory + Summary string + Field string + SpecRef string + SpecText string + // Fatal indicates that parsing has been aborted. + Fatal bool +} + +func (err Error) Error() string { + var msg bytes.Buffer + if err.ID != ErrInvalidID { + if err.Fatal { + msg.WriteRune('E') + } else { + msg.WriteRune('W') + } + msg.WriteString(fmt.Sprintf("%03d: ", err.ID)) + } + msg.WriteString(err.Summary) + return msg.String() +} + +// VerboseError creates a more verbose error string, including spec details. +func (err Error) VerboseError() string { + var msg bytes.Buffer + msg.WriteString(err.Error()) + if len(err.Field) > 0 || err.Category != UnknownCategory || len(err.SpecRef) > 0 || len(err.SpecText) > 0 { + msg.WriteString(" (") + needSep := false + if len(err.Field) > 0 { + msg.WriteString(err.Field) + needSep = true + } + if err.Category != UnknownCategory { + if needSep { + msg.WriteString(": ") + } + msg.WriteString(err.Category.String()) + needSep = true + } + if len(err.SpecRef) > 0 { + if needSep { + msg.WriteString(": ") + } + msg.WriteString(err.SpecRef) + needSep = true + } + if len(err.SpecText) > 0 { + if needSep { + if len(err.SpecRef) > 0 { + msg.WriteString(", ") + } else { + msg.WriteString(": ") + } + } + msg.WriteString("'") + msg.WriteString(err.SpecText) + msg.WriteString("'") + } + msg.WriteString(")") + } + + return msg.String() +} + +// ErrCategory indicates the category of an x509.Error. +type ErrCategory int + +// ErrCategory values. +const ( + UnknownCategory ErrCategory = iota + // Errors in ASN.1 encoding + InvalidASN1Encoding + InvalidASN1Content + InvalidASN1DER + // Errors in ASN.1 relative to schema + InvalidValueRange + InvalidASN1Type + UnexpectedAdditionalData + // Errors in X.509 + PoorlyFormedCertificate // Fails a SHOULD clause + MalformedCertificate // Fails a MUST clause + PoorlyFormedCRL // Fails a SHOULD clause + MalformedCRL // Fails a MUST clause + // Errors relative to CA/Browser Forum guidelines + BaselineRequirementsFailure + EVRequirementsFailure + // Other errors + InsecureAlgorithm + UnrecognizedValue +) + +func (category ErrCategory) String() string { + switch category { + case InvalidASN1Encoding: + return "Invalid ASN.1 encoding" + case InvalidASN1Content: + return "Invalid ASN.1 content" + case InvalidASN1DER: + return "Invalid ASN.1 distinguished encoding" + case InvalidValueRange: + return "Invalid value for range given in schema" + case InvalidASN1Type: + return "Invalid ASN.1 type for schema" + case UnexpectedAdditionalData: + return "Unexpected additional data present" + case PoorlyFormedCertificate: + return "Certificate does not comply with SHOULD clause in spec" + case MalformedCertificate: + return "Certificate does not comply with MUST clause in spec" + case PoorlyFormedCRL: + return "Certificate Revocation List does not comply with SHOULD clause in spec" + case MalformedCRL: + return "Certificate Revocation List does not comply with MUST clause in spec" + case BaselineRequirementsFailure: + return "Certificate does not comply with CA/BF baseline requirements" + case EVRequirementsFailure: + return "Certificate does not comply with CA/BF EV requirements" + case InsecureAlgorithm: + return "Certificate uses an insecure algorithm" + case UnrecognizedValue: + return "Certificate uses an unrecognized value" + default: + return fmt.Sprintf("Unknown (%d)", category) + } +} + +// ErrorID is an identifier for an x509.Error, to allow filtering. +type ErrorID int + +// Errors implements the error interface and holds a collection of errors found in a certificate or CRL. +type Errors struct { + Errs []Error +} + +// Error converts to a string. +func (e *Errors) Error() string { + return e.combineErrors(Error.Error) +} + +// VerboseError creates a more verbose error string, including spec details. +func (e *Errors) VerboseError() string { + return e.combineErrors(Error.VerboseError) +} + +// Fatal indicates whether e includes a fatal error +func (e *Errors) Fatal() bool { + return (e.FirstFatal() != nil) +} + +// Empty indicates whether e has no errors. +func (e *Errors) Empty() bool { + if e == nil { + return true + } + return len(e.Errs) == 0 +} + +// FirstFatal returns the first fatal error in e, or nil +// if there is no fatal error. +func (e *Errors) FirstFatal() error { + if e == nil { + return nil + } + for _, err := range e.Errs { + if err.Fatal { + return err + } + } + return nil + +} + +// AddID adds the Error identified by the given id to an x509.Errors. +func (e *Errors) AddID(id ErrorID, args ...interface{}) { + e.Errs = append(e.Errs, NewError(id, args...)) +} + +func (e Errors) combineErrors(errfn func(Error) string) string { + if len(e.Errs) == 0 { + return "" + } + if len(e.Errs) == 1 { + return errfn((e.Errs)[0]) + } + var msg bytes.Buffer + msg.WriteString("Errors:") + for _, err := range e.Errs { + msg.WriteString("\n ") + msg.WriteString(errfn(err)) + } + return msg.String() +} + +// Filter creates a new Errors object with any entries from the filtered +// list of IDs removed. +func (e Errors) Filter(filtered []ErrorID) Errors { + var results Errors +eloop: + for _, v := range e.Errs { + for _, f := range filtered { + if v.ID == f { + break eloop + } + } + results.Errs = append(results.Errs, v) + } + return results +} + +// ErrorFilter builds a list of error IDs (suitable for use with Errors.Filter) from a comma-separated string. +func ErrorFilter(ignore string) []ErrorID { + var ids []ErrorID + filters := strings.Split(ignore, ",") + for _, f := range filters { + v, err := strconv.Atoi(f) + if err != nil { + continue + } + ids = append(ids, ErrorID(v)) + } + return ids +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/errors.go b/vendor/github.com/google/certificate-transparency-go/x509/errors.go new file mode 100644 index 00000000000..ec2fe06a99f --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/errors.go @@ -0,0 +1,302 @@ +package x509 + +import "fmt" + +// To preserve error IDs, only append to this list, never insert. +const ( + ErrInvalidID ErrorID = iota + ErrInvalidCertList + ErrTrailingCertList + ErrUnexpectedlyCriticalCertListExtension + ErrUnexpectedlyNonCriticalCertListExtension + ErrInvalidCertListAuthKeyID + ErrTrailingCertListAuthKeyID + ErrInvalidCertListIssuerAltName + ErrInvalidCertListCRLNumber + ErrTrailingCertListCRLNumber + ErrNegativeCertListCRLNumber + ErrInvalidCertListDeltaCRL + ErrTrailingCertListDeltaCRL + ErrNegativeCertListDeltaCRL + ErrInvalidCertListIssuingDP + ErrTrailingCertListIssuingDP + ErrCertListIssuingDPMultipleTypes + ErrCertListIssuingDPInvalidFullName + ErrInvalidCertListFreshestCRL + ErrInvalidCertListAuthInfoAccess + ErrTrailingCertListAuthInfoAccess + ErrUnhandledCriticalCertListExtension + ErrUnexpectedlyCriticalRevokedCertExtension + ErrUnexpectedlyNonCriticalRevokedCertExtension + ErrInvalidRevocationReason + ErrTrailingRevocationReason + ErrInvalidRevocationInvalidityDate + ErrTrailingRevocationInvalidityDate + ErrInvalidRevocationIssuer + ErrUnhandledCriticalRevokedCertExtension + + ErrMaxID +) + +// idToError gives a template x509.Error for each defined ErrorID; where the Summary +// field may hold format specifiers that take field parameters. +var idToError map[ErrorID]Error + +var errorInfo = []Error{ + { + ID: ErrInvalidCertList, + Summary: "x509: failed to parse CertificateList: %v", + Field: "CertificateList", + SpecRef: "RFC 5280 s5.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertList, + Summary: "x509: trailing data after CertificateList", + Field: "CertificateList", + SpecRef: "RFC 5280 s5.1", + Category: InvalidASN1Content, + Fatal: true, + }, + + { + ID: ErrUnexpectedlyCriticalCertListExtension, + Summary: "x509: certificate list extension %v marked critical but expected to be non-critical", + Field: "tbsCertList.crlExtensions.*.critical", + SpecRef: "RFC 5280 s5.2", + Category: MalformedCRL, + }, + { + ID: ErrUnexpectedlyNonCriticalCertListExtension, + Summary: "x509: certificate list extension %v marked non-critical but expected to be critical", + Field: "tbsCertList.crlExtensions.*.critical", + SpecRef: "RFC 5280 s5.2", + Category: MalformedCRL, + }, + + { + ID: ErrInvalidCertListAuthKeyID, + Summary: "x509: failed to unmarshal certificate-list authority key-id: %v", + Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier", + SpecRef: "RFC 5280 s5.2.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListAuthKeyID, + Summary: "x509: trailing data after certificate list auth key ID", + Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier", + SpecRef: "RFC 5280 s5.2.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListIssuerAltName, + Summary: "x509: failed to parse CRL issuer alt name: %v", + Field: "tbsCertList.crlExtensions.*.IssuerAltName", + SpecRef: "RFC 5280 s5.2.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListCRLNumber, + Summary: "x509: failed to unmarshal certificate-list crl-number: %v", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListCRLNumber, + Summary: "x509: trailing data after certificate list crl-number", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrNegativeCertListCRLNumber, + Summary: "x509: negative certificate list crl-number: %d", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrInvalidCertListDeltaCRL, + Summary: "x509: failed to unmarshal certificate-list delta-crl: %v", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListDeltaCRL, + Summary: "x509: trailing data after certificate list delta-crl", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrNegativeCertListDeltaCRL, + Summary: "x509: negative certificate list base-crl-number: %d", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrInvalidCertListIssuingDP, + Summary: "x509: failed to unmarshal certificate list issuing distribution point: %v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListIssuingDP, + Summary: "x509: trailing data after certificate list issuing distribution point", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrCertListIssuingDPMultipleTypes, + Summary: "x509: multiple cert types set in issuing-distribution-point: user:%v CA:%v attr:%v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + SpecText: "at most one of onlyContainsUserCerts, onlyContainsCACerts, and onlyContainsAttributeCerts may be set to TRUE.", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrCertListIssuingDPInvalidFullName, + Summary: "x509: failed to parse CRL issuing-distribution-point fullName: %v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint.distributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListFreshestCRL, + Summary: "x509: failed to unmarshal certificate list freshestCRL: %v", + Field: "tbsCertList.crlExtensions.*.FreshestCRL", + SpecRef: "RFC 5280 s5.2.6", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListAuthInfoAccess, + Summary: "x509: failed to unmarshal certificate list authority info access: %v", + Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess", + SpecRef: "RFC 5280 s5.2.7", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListAuthInfoAccess, + Summary: "x509: trailing data after certificate list authority info access", + Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess", + SpecRef: "RFC 5280 s5.2.7", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrUnhandledCriticalCertListExtension, + Summary: "x509: unhandled critical extension in certificate list: %v", + Field: "tbsCertList.revokedCertificates.crlExtensions.*", + SpecRef: "RFC 5280 s5.2", + SpecText: "If a CRL contains a critical extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of certificates.", + Category: MalformedCRL, + Fatal: true, + }, + + { + ID: ErrUnexpectedlyCriticalRevokedCertExtension, + Summary: "x509: revoked certificate extension %v marked critical but expected to be non-critical", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical", + SpecRef: "RFC 5280 s5.3", + Category: MalformedCRL, + }, + { + ID: ErrUnexpectedlyNonCriticalRevokedCertExtension, + Summary: "x509: revoked certificate extension %v marked non-critical but expected to be critical", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical", + SpecRef: "RFC 5280 s5.3", + Category: MalformedCRL, + }, + + { + ID: ErrInvalidRevocationReason, + Summary: "x509: failed to parse revocation reason: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason", + SpecRef: "RFC 5280 s5.3.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingRevocationReason, + Summary: "x509: trailing data after revoked certificate reason", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason", + SpecRef: "RFC 5280 s5.3.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidRevocationInvalidityDate, + Summary: "x509: failed to parse revoked certificate invalidity date: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate", + SpecRef: "RFC 5280 s5.3.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingRevocationInvalidityDate, + Summary: "x509: trailing data after revoked certificate invalidity date", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate", + SpecRef: "RFC 5280 s5.3.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidRevocationIssuer, + Summary: "x509: failed to parse revocation issuer %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CertificateIssuer", + SpecRef: "RFC 5280 s5.3.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrUnhandledCriticalRevokedCertExtension, + Summary: "x509: unhandled critical extension in revoked certificate: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*", + SpecRef: "RFC 5280 s5.3", + SpecText: "If a CRL contains a critical CRL entry extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of any certificates.", + Category: MalformedCRL, + Fatal: true, + }, +} + +func init() { + idToError = make(map[ErrorID]Error, len(errorInfo)) + for _, info := range errorInfo { + idToError[info.ID] = info + } +} + +// NewError builds a new x509.Error based on the template for the given id. +func NewError(id ErrorID, args ...interface{}) Error { + var err Error + if id >= ErrMaxID { + err.ID = id + err.Summary = fmt.Sprintf("Unknown error ID %v: args %+v", id, args) + err.Fatal = true + } else { + err = idToError[id] + err.Summary = fmt.Sprintf(err.Summary, args...) + } + return err +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/names.go b/vendor/github.com/google/certificate-transparency-go/x509/names.go new file mode 100644 index 00000000000..4829edeb04b --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/names.go @@ -0,0 +1,165 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "fmt" + "net" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +const ( + // GeneralName tag values from RFC 5280, 4.2.1.6 + tagOtherName = 0 + tagRFC822Name = 1 + tagDNSName = 2 + tagX400Address = 3 + tagDirectoryName = 4 + tagEDIPartyName = 5 + tagURI = 6 + tagIPAddress = 7 + tagRegisteredID = 8 +) + +// OtherName describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } +type OtherName struct { + TypeID asn1.ObjectIdentifier + Value asn1.RawValue +} + +// GeneralNames holds a collection of names related to a certificate. +type GeneralNames struct { + DNSNames []string + EmailAddresses []string + DirectoryNames []pkix.Name + URIs []string + IPNets []net.IPNet + RegisteredIDs []asn1.ObjectIdentifier + OtherNames []OtherName +} + +// Len returns the total number of names in a GeneralNames object. +func (gn GeneralNames) Len() int { + return (len(gn.DNSNames) + len(gn.EmailAddresses) + len(gn.DirectoryNames) + + len(gn.URIs) + len(gn.IPNets) + len(gn.RegisteredIDs) + len(gn.OtherNames)) +} + +// Empty indicates whether a GeneralNames object is empty. +func (gn GeneralNames) Empty() bool { + return gn.Len() == 0 +} + +func parseGeneralNames(value []byte, gname *GeneralNames) error { + // RFC 5280, 4.2.1.6 + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + var rest []byte + if rest, err := asn1.Unmarshal(value, &seq); err != nil { + return fmt.Errorf("x509: failed to parse GeneralNames: %v", err) + } else if len(rest) != 0 { + return fmt.Errorf("x509: trailing data after GeneralNames") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return fmt.Errorf("x509: failed to parse GeneralNames sequence, tag %+v", seq) + } + + rest = seq.Bytes + for len(rest) > 0 { + var err error + rest, err = parseGeneralName(rest, gname, false) + if err != nil { + return fmt.Errorf("x509: failed to parse GeneralName: %v", err) + } + } + return nil +} + +func parseGeneralName(data []byte, gname *GeneralNames, withMask bool) ([]byte, error) { + var v asn1.RawValue + var rest []byte + var err error + rest, err = asn1.Unmarshal(data, &v) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames: %v", err) + } + switch v.Tag { + case tagOtherName: + if !v.IsCompound { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: not compound") + } + var other OtherName + v.FullBytes = append([]byte{}, v.FullBytes...) + v.FullBytes[0] = asn1.TagSequence | 0x20 + _, err = asn1.Unmarshal(v.FullBytes, &other) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: %v", err) + } + gname.OtherNames = append(gname.OtherNames, other) + case tagRFC822Name: + gname.EmailAddresses = append(gname.EmailAddresses, string(v.Bytes)) + case tagDNSName: + dns := string(v.Bytes) + gname.DNSNames = append(gname.DNSNames, dns) + case tagDirectoryName: + var rdnSeq pkix.RDNSequence + if _, err := asn1.Unmarshal(v.Bytes, &rdnSeq); err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.directoryName: %v", err) + } + var dirName pkix.Name + dirName.FillFromRDNSequence(&rdnSeq) + gname.DirectoryNames = append(gname.DirectoryNames, dirName) + case tagURI: + gname.URIs = append(gname.URIs, string(v.Bytes)) + case tagIPAddress: + vlen := len(v.Bytes) + if withMask { + switch vlen { + case (2 * net.IPv4len), (2 * net.IPv6len): + ipNet := net.IPNet{IP: v.Bytes[0 : vlen/2], Mask: v.Bytes[vlen/2:]} + gname.IPNets = append(gname.IPNets, ipNet) + default: + return nil, fmt.Errorf("x509: invalid IP/mask length %d in GeneralNames.iPAddress", vlen) + } + } else { + switch vlen { + case net.IPv4len, net.IPv6len: + ipNet := net.IPNet{IP: v.Bytes} + gname.IPNets = append(gname.IPNets, ipNet) + default: + return nil, fmt.Errorf("x509: invalid IP length %d in GeneralNames.iPAddress", vlen) + } + } + case tagRegisteredID: + var oid asn1.ObjectIdentifier + v.FullBytes = append([]byte{}, v.FullBytes...) + v.FullBytes[0] = asn1.TagOID + _, err = asn1.Unmarshal(v.FullBytes, &oid) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.registeredID: %v", err) + } + gname.RegisteredIDs = append(gname.RegisteredIDs, oid) + default: + return nil, fmt.Errorf("x509: failed to unmarshal GeneralName: unknown tag %d", v.Tag) + } + return rest, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go new file mode 100644 index 00000000000..93d1e4a922d --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// RFC 1423 describes the encryption of PEM blocks. The algorithm used to +// generate a key from the password was derived by looking at the OpenSSL +// implementation. + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/md5" + "encoding/hex" + "encoding/pem" + "errors" + "io" + "strings" +) + +type PEMCipher int + +// Possible values for the EncryptPEMBlock encryption algorithm. +const ( + _ PEMCipher = iota + PEMCipherDES + PEMCipher3DES + PEMCipherAES128 + PEMCipherAES192 + PEMCipherAES256 +) + +// rfc1423Algo holds a method for enciphering a PEM block. +type rfc1423Algo struct { + cipher PEMCipher + name string + cipherFunc func(key []byte) (cipher.Block, error) + keySize int + blockSize int +} + +// rfc1423Algos holds a slice of the possible ways to encrypt a PEM +// block. The ivSize numbers were taken from the OpenSSL source. +var rfc1423Algos = []rfc1423Algo{{ + cipher: PEMCipherDES, + name: "DES-CBC", + cipherFunc: des.NewCipher, + keySize: 8, + blockSize: des.BlockSize, +}, { + cipher: PEMCipher3DES, + name: "DES-EDE3-CBC", + cipherFunc: des.NewTripleDESCipher, + keySize: 24, + blockSize: des.BlockSize, +}, { + cipher: PEMCipherAES128, + name: "AES-128-CBC", + cipherFunc: aes.NewCipher, + keySize: 16, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES192, + name: "AES-192-CBC", + cipherFunc: aes.NewCipher, + keySize: 24, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES256, + name: "AES-256-CBC", + cipherFunc: aes.NewCipher, + keySize: 32, + blockSize: aes.BlockSize, +}, +} + +// deriveKey uses a key derivation function to stretch the password into a key +// with the number of bits our cipher requires. This algorithm was derived from +// the OpenSSL source. +func (c rfc1423Algo) deriveKey(password, salt []byte) []byte { + hash := md5.New() + out := make([]byte, c.keySize) + var digest []byte + + for i := 0; i < len(out); i += len(digest) { + hash.Reset() + hash.Write(digest) + hash.Write(password) + hash.Write(salt) + digest = hash.Sum(digest[:0]) + copy(out[i:], digest) + } + return out +} + +// IsEncryptedPEMBlock returns if the PEM block is password encrypted. +func IsEncryptedPEMBlock(b *pem.Block) bool { + _, ok := b.Headers["DEK-Info"] + return ok +} + +// IncorrectPasswordError is returned when an incorrect password is detected. +var IncorrectPasswordError = errors.New("x509: decryption password incorrect") + +// DecryptPEMBlock takes a password encrypted PEM block and the password used to +// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects +// the DEK-Info header to determine the algorithm used for decryption. If no +// DEK-Info header is present, an error is returned. If an incorrect password +// is detected an IncorrectPasswordError is returned. Because of deficiencies +// in the encrypted-PEM format, it's not always possible to detect an incorrect +// password. In these cases no error will be returned but the decrypted DER +// bytes will be random noise. +func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) { + dek, ok := b.Headers["DEK-Info"] + if !ok { + return nil, errors.New("x509: no DEK-Info header in block") + } + + idx := strings.Index(dek, ",") + if idx == -1 { + return nil, errors.New("x509: malformed DEK-Info header") + } + + mode, hexIV := dek[:idx], dek[idx+1:] + ciph := cipherByName(mode) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv, err := hex.DecodeString(hexIV) + if err != nil { + return nil, err + } + if len(iv) != ciph.blockSize { + return nil, errors.New("x509: incorrect IV size") + } + + // Based on the OpenSSL implementation. The salt is the first 8 bytes + // of the initialization vector. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + + if len(b.Bytes)%block.BlockSize() != 0 { + return nil, errors.New("x509: encrypted PEM data is not a multiple of the block size") + } + + data := make([]byte, len(b.Bytes)) + dec := cipher.NewCBCDecrypter(block, iv) + dec.CryptBlocks(data, b.Bytes) + + // Blocks are padded using a scheme where the last n bytes of padding are all + // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423. + // For example: + // [x y z 2 2] + // [x y 7 7 7 7 7 7 7] + // If we detect a bad padding, we assume it is an invalid password. + dlen := len(data) + if dlen == 0 || dlen%ciph.blockSize != 0 { + return nil, errors.New("x509: invalid padding") + } + last := int(data[dlen-1]) + if dlen < last { + return nil, IncorrectPasswordError + } + if last == 0 || last > ciph.blockSize { + return nil, IncorrectPasswordError + } + for _, val := range data[dlen-last:] { + if int(val) != last { + return nil, IncorrectPasswordError + } + } + return data[:dlen-last], nil +} + +// EncryptPEMBlock returns a PEM block of the specified type holding the +// given DER-encoded data encrypted with the specified algorithm and +// password. +func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) { + ciph := cipherByKey(alg) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv := make([]byte, ciph.blockSize) + if _, err := io.ReadFull(rand, iv); err != nil { + return nil, errors.New("x509: cannot generate IV: " + err.Error()) + } + // The salt is the first 8 bytes of the initialization vector, + // matching the key derivation in DecryptPEMBlock. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + enc := cipher.NewCBCEncrypter(block, iv) + pad := ciph.blockSize - len(data)%ciph.blockSize + encrypted := make([]byte, len(data), len(data)+pad) + // We could save this copy by encrypting all the whole blocks in + // the data separately, but it doesn't seem worth the additional + // code. + copy(encrypted, data) + // See RFC 1423, Section 1.1. + for i := 0; i < pad; i++ { + encrypted = append(encrypted, byte(pad)) + } + enc.CryptBlocks(encrypted, encrypted) + + return &pem.Block{ + Type: blockType, + Headers: map[string]string{ + "Proc-Type": "4,ENCRYPTED", + "DEK-Info": ciph.name + "," + hex.EncodeToString(iv), + }, + Bytes: encrypted, + }, nil +} + +func cipherByName(name string) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.name == name { + return alg + } + } + return nil +} + +func cipherByKey(key PEMCipher) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.cipher == key { + return alg + } + } + return nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go new file mode 100644 index 00000000000..bea05b57fd8 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/rsa" + "errors" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key. +type pkcs1PrivateKey struct { + Version int + N *big.Int + E int + D *big.Int + P *big.Int + Q *big.Int + // We ignore these values, if present, because rsa will calculate them. + Dp *big.Int `asn1:"optional"` + Dq *big.Int `asn1:"optional"` + Qinv *big.Int `asn1:"optional"` + + AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"` +} + +type pkcs1AdditionalRSAPrime struct { + Prime *big.Int + + // We ignore these values because rsa will calculate them. + Exp *big.Int + Coeff *big.Int +} + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS#1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} + +// ParsePKCS1PrivateKey parses an RSA private key in PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY". +func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) { + var priv pkcs1PrivateKey + rest, err := asn1.Unmarshal(der, &priv) + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + if err != nil { + if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)") + } + if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)") + } + return nil, err + } + + if priv.Version > 1 { + return nil, errors.New("x509: unsupported private key version") + } + + if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative value") + } + + key := new(rsa.PrivateKey) + key.PublicKey = rsa.PublicKey{ + E: priv.E, + N: priv.N, + } + + key.D = priv.D + key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes)) + key.Primes[0] = priv.P + key.Primes[1] = priv.Q + for i, a := range priv.AdditionalPrimes { + if a.Prime.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative prime") + } + key.Primes[i+2] = a.Prime + // We ignore the other two values because rsa will calculate + // them as needed. + } + + err = key.Validate() + if err != nil { + return nil, err + } + key.Precompute() + + return key, nil +} + +// MarshalPKCS1PrivateKey converts an RSA private key to PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY". +// For a more flexible key format which is not RSA specific, use +// MarshalPKCS8PrivateKey. +func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte { + key.Precompute() + + version := 0 + if len(key.Primes) > 2 { + version = 1 + } + + priv := pkcs1PrivateKey{ + Version: version, + N: key.N, + E: key.PublicKey.E, + D: key.D, + P: key.Primes[0], + Q: key.Primes[1], + Dp: key.Precomputed.Dp, + Dq: key.Precomputed.Dq, + Qinv: key.Precomputed.Qinv, + } + + priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues)) + for i, values := range key.Precomputed.CRTValues { + priv.AdditionalPrimes[i].Prime = key.Primes[2+i] + priv.AdditionalPrimes[i].Exp = values.Exp + priv.AdditionalPrimes[i].Coeff = values.Coeff + } + + b, _ := asn1.Marshal(priv) + return b +} + +// ParsePKCS1PublicKey parses an RSA public key in PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY". +func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) { + var pub pkcs1PublicKey + rest, err := asn1.Unmarshal(der, &pub) + if err != nil { + if _, err := asn1.Unmarshal(der, &publicKeyInfo{}); err == nil { + return nil, errors.New("x509: failed to parse public key (use ParsePKIXPublicKey instead for this key format)") + } + return nil, err + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + if pub.N.Sign() <= 0 || pub.E <= 0 { + return nil, errors.New("x509: public key contains zero or negative value") + } + if pub.E > 1<<31-1 { + return nil, errors.New("x509: public key contains large public exponent") + } + + return &rsa.PublicKey{ + E: pub.E, + N: pub.N, + }, nil +} + +// MarshalPKCS1PublicKey converts an RSA public key to PKCS#1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY". +func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte { + derBytes, _ := asn1.Marshal(pkcs1PublicKey{ + N: key.N, + E: key.E, + }) + return derBytes +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go new file mode 100644 index 00000000000..a144eb6a5d4 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go @@ -0,0 +1,139 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" + + // TODO(robpercival): change this to crypto/ed25519 when Go 1.13 is min version + "golang.org/x/crypto/ed25519" +) + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC 5208. +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS#8, ASN.1 DER form. +// +// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. +// More types might be supported in the future. +// +// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". +func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)") + } + if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)") + } + return nil, err + } + switch { + case privKey.Algo.Algorithm.Equal(OIDPublicKeyRSA): + key, err = ParsePKCS1PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(OIDPublicKeyECDSA): + bytes := privKey.Algo.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil { + namedCurveOID = nil + } + key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(OIDPublicKeyEd25519): + if l := len(privKey.Algo.Parameters.FullBytes); l != 0 { + return nil, errors.New("x509: invalid Ed25519 private key parameters") + } + var curvePrivateKey []byte + if _, err := asn1.Unmarshal(privKey.PrivateKey, &curvePrivateKey); err != nil { + return nil, fmt.Errorf("x509: invalid Ed25519 private key: %v", err) + } + if l := len(curvePrivateKey); l != ed25519.SeedSize { + return nil, fmt.Errorf("x509: invalid Ed25519 private key length: %d", l) + } + return ed25519.NewKeyFromSeed(curvePrivateKey), nil + + default: + return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm) + } +} + +// MarshalPKCS8PrivateKey converts a private key to PKCS#8, ASN.1 DER form. +// +// The following key types are currently supported: *rsa.PrivateKey, *ecdsa.PrivateKey +// and ed25519.PrivateKey. Unsupported key types result in an error. +// +// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". +func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) { + var privKey pkcs8 + + switch k := key.(type) { + case *rsa.PrivateKey: + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyRSA, + Parameters: asn1.NullRawValue, + } + privKey.PrivateKey = MarshalPKCS1PrivateKey(k) + + case *ecdsa.PrivateKey: + oid, ok := OIDFromNamedCurve(k.Curve) + if !ok { + return nil, errors.New("x509: unknown curve while marshaling to PKCS#8") + } + + oidBytes, err := asn1.Marshal(oid) + if err != nil { + return nil, errors.New("x509: failed to marshal curve OID: " + err.Error()) + } + + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyECDSA, + Parameters: asn1.RawValue{ + FullBytes: oidBytes, + }, + } + + if privKey.PrivateKey, err = marshalECPrivateKeyWithOID(k, nil); err != nil { + return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error()) + } + + case ed25519.PrivateKey: + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyEd25519, + } + curvePrivateKey, err := asn1.Marshal(k.Seed()) + if err != nil { + return nil, fmt.Errorf("x509: failed to marshal private key: %v", err) + } + privKey.PrivateKey = curvePrivateKey + + default: + return nil, fmt.Errorf("x509: unknown key type while marshaling PKCS#8: %T", key) + } + + return asn1.Marshal(privKey) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go new file mode 100644 index 00000000000..1716f908abc --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go @@ -0,0 +1,287 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkix contains shared, low level structures used for ASN.1 parsing +// and serialization of X.509 certificates, CRL and OCSP. +package pkix + +import ( + "encoding/hex" + "fmt" + "math/big" + "time" + + "github.com/google/certificate-transparency-go/asn1" +) + +// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.1.1.2. +type AlgorithmIdentifier struct { + Raw asn1.RawContent + Algorithm asn1.ObjectIdentifier + Parameters asn1.RawValue `asn1:"optional"` +} + +type RDNSequence []RelativeDistinguishedNameSET + +var attributeTypeNames = map[string]string{ + "2.5.4.6": "C", + "2.5.4.10": "O", + "2.5.4.11": "OU", + "2.5.4.3": "CN", + "2.5.4.5": "SERIALNUMBER", + "2.5.4.7": "L", + "2.5.4.8": "ST", + "2.5.4.9": "STREET", + "2.5.4.17": "POSTALCODE", +} + +// String returns a string representation of the sequence r, +// roughly following the RFC 2253 Distinguished Names syntax. +func (r RDNSequence) String() string { + s := "" + for i := 0; i < len(r); i++ { + rdn := r[len(r)-1-i] + if i > 0 { + s += "," + } + for j, tv := range rdn { + if j > 0 { + s += "+" + } + + oidString := tv.Type.String() + typeName, ok := attributeTypeNames[oidString] + if !ok { + derBytes, err := asn1.Marshal(tv.Value) + if err == nil { + s += oidString + "=#" + hex.EncodeToString(derBytes) + continue // No value escaping necessary. + } + + typeName = oidString + } + + valueString := fmt.Sprint(tv.Value) + escaped := make([]rune, 0, len(valueString)) + + for k, c := range valueString { + escape := false + + switch c { + case ',', '+', '"', '\\', '<', '>', ';': + escape = true + + case ' ': + escape = k == 0 || k == len(valueString)-1 + + case '#': + escape = k == 0 + } + + if escape { + escaped = append(escaped, '\\', c) + } else { + escaped = append(escaped, c) + } + } + + s += typeName + "=" + string(escaped) + } + } + + return s +} + +type RelativeDistinguishedNameSET []AttributeTypeAndValue + +// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in +// RFC 5280, Section 4.1.2.4. +type AttributeTypeAndValue struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +// AttributeTypeAndValueSET represents a set of ASN.1 sequences of +// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10). +type AttributeTypeAndValueSET struct { + Type asn1.ObjectIdentifier + Value [][]AttributeTypeAndValue `asn1:"set"` +} + +// Extension represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.2. +type Extension struct { + Id asn1.ObjectIdentifier + Critical bool `asn1:"optional"` + Value []byte +} + +// Name represents an X.509 distinguished name. This only includes the common +// elements of a DN. When parsing, all elements are stored in Names and +// non-standard elements can be extracted from there. When marshaling, elements +// in ExtraNames are appended and override other values with the same OID. +type Name struct { + Country, Organization, OrganizationalUnit []string + Locality, Province []string + StreetAddress, PostalCode []string + SerialNumber, CommonName string + + Names []AttributeTypeAndValue + ExtraNames []AttributeTypeAndValue +} + +func (n *Name) FillFromRDNSequence(rdns *RDNSequence) { + for _, rdn := range *rdns { + if len(rdn) == 0 { + continue + } + + for _, atv := range rdn { + n.Names = append(n.Names, atv) + value, ok := atv.Value.(string) + if !ok { + continue + } + + t := atv.Type + if len(t) == 4 && t[0] == OIDAttribute[0] && t[1] == OIDAttribute[1] && t[2] == OIDAttribute[2] { + switch t[3] { + case OIDCommonName[3]: + n.CommonName = value + case OIDSerialNumber[3]: + n.SerialNumber = value + case OIDCountry[3]: + n.Country = append(n.Country, value) + case OIDLocality[3]: + n.Locality = append(n.Locality, value) + case OIDProvince[3]: + n.Province = append(n.Province, value) + case OIDStreetAddress[3]: + n.StreetAddress = append(n.StreetAddress, value) + case OIDOrganization[3]: + n.Organization = append(n.Organization, value) + case OIDOrganizationalUnit[3]: + n.OrganizationalUnit = append(n.OrganizationalUnit, value) + case OIDPostalCode[3]: + n.PostalCode = append(n.PostalCode, value) + } + } + } + } +} + +var ( + OIDAttribute = asn1.ObjectIdentifier{2, 5, 4} + OIDCountry = asn1.ObjectIdentifier{2, 5, 4, 6} + OIDOrganization = asn1.ObjectIdentifier{2, 5, 4, 10} + OIDOrganizationalUnit = asn1.ObjectIdentifier{2, 5, 4, 11} + OIDCommonName = asn1.ObjectIdentifier{2, 5, 4, 3} + OIDSerialNumber = asn1.ObjectIdentifier{2, 5, 4, 5} + OIDLocality = asn1.ObjectIdentifier{2, 5, 4, 7} + OIDProvince = asn1.ObjectIdentifier{2, 5, 4, 8} + OIDStreetAddress = asn1.ObjectIdentifier{2, 5, 4, 9} + OIDPostalCode = asn1.ObjectIdentifier{2, 5, 4, 17} + + OIDPseudonym = asn1.ObjectIdentifier{2, 5, 4, 65} + OIDTitle = asn1.ObjectIdentifier{2, 5, 4, 12} + OIDDnQualifier = asn1.ObjectIdentifier{2, 5, 4, 46} + OIDName = asn1.ObjectIdentifier{2, 5, 4, 41} + OIDSurname = asn1.ObjectIdentifier{2, 5, 4, 4} + OIDGivenName = asn1.ObjectIdentifier{2, 5, 4, 42} + OIDInitials = asn1.ObjectIdentifier{2, 5, 4, 43} + OIDGenerationQualifier = asn1.ObjectIdentifier{2, 5, 4, 44} +) + +// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence +// and returns the new value. The relativeDistinguishedNameSET contains an +// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and +// search for AttributeTypeAndValue. +func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence { + if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) { + return in + } + + s := make([]AttributeTypeAndValue, len(values)) + for i, value := range values { + s[i].Type = oid + s[i].Value = value + } + + return append(in, s) +} + +func (n Name) ToRDNSequence() (ret RDNSequence) { + ret = n.appendRDNs(ret, n.Country, OIDCountry) + ret = n.appendRDNs(ret, n.Province, OIDProvince) + ret = n.appendRDNs(ret, n.Locality, OIDLocality) + ret = n.appendRDNs(ret, n.StreetAddress, OIDStreetAddress) + ret = n.appendRDNs(ret, n.PostalCode, OIDPostalCode) + ret = n.appendRDNs(ret, n.Organization, OIDOrganization) + ret = n.appendRDNs(ret, n.OrganizationalUnit, OIDOrganizationalUnit) + if len(n.CommonName) > 0 { + ret = n.appendRDNs(ret, []string{n.CommonName}, OIDCommonName) + } + if len(n.SerialNumber) > 0 { + ret = n.appendRDNs(ret, []string{n.SerialNumber}, OIDSerialNumber) + } + for _, atv := range n.ExtraNames { + ret = append(ret, []AttributeTypeAndValue{atv}) + } + + return ret +} + +// String returns the string form of n, roughly following +// the RFC 2253 Distinguished Names syntax. +func (n Name) String() string { + return n.ToRDNSequence().String() +} + +// oidInAttributeTypeAndValue reports whether a type with the given OID exists +// in atv. +func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool { + for _, a := range atv { + if a.Type.Equal(oid) { + return true + } + } + return false +} + +// CertificateList represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the +// signature. +type CertificateList struct { + TBSCertList TBSCertificateList + SignatureAlgorithm AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// HasExpired reports whether certList should have been updated by now. +func (certList *CertificateList) HasExpired(now time.Time) bool { + return !now.Before(certList.TBSCertList.NextUpdate) +} + +// TBSCertificateList represents the ASN.1 structure TBSCertList. See RFC +// 5280, section 5.1. +type TBSCertificateList struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0"` + Signature AlgorithmIdentifier + Issuer RDNSequence + ThisUpdate time.Time + NextUpdate time.Time `asn1:"optional"` + RevokedCertificates []RevokedCertificate `asn1:"optional"` + Extensions []Extension `asn1:"tag:0,optional,explicit"` +} + +// RevokedCertificate represents the unnamed ASN.1 structure that makes up the +// revokedCertificates member of the TBSCertList structure. See RFC +// 5280, section 5.1. +type RevokedCertificate struct { + SerialNumber *big.Int + RevocationTime time.Time + Extensions []Extension `asn1:"optional"` +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go new file mode 100644 index 00000000000..06fd439c1fb --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package x509 + +import ( + "syscall" + "unsafe" +) + +// For Go versions >= 1.11, the ExtraPolicyPara field in +// syscall.CertChainPolicyPara is of type syscall.Pointer. See: +// https://github.com/golang/go/commit/4869ec00e87ef + +func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer { + return (syscall.Pointer)(p) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go new file mode 100644 index 00000000000..f13a47adfbe --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package x509 + +import "unsafe" + +// For Go versions before 1.11, the ExtraPolicyPara field in +// syscall.CertChainPolicyPara was of type uintptr. See: +// https://github.com/golang/go/commit/4869ec00e87ef + +func convertToPolicyParaType(p unsafe.Pointer) uintptr { + return uintptr(p) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/revoked.go b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go new file mode 100644 index 00000000000..e5fa6dd15f5 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go @@ -0,0 +1,365 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "encoding/pem" + "time" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2. +var ( + OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20} + OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27} + OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28} +) + +// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3 +var ( + OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21} + OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24} + OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29} +) + +// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1. +type RevocationReasonCode asn1.Enumerated + +// RevocationReasonCode values. +var ( + Unspecified = RevocationReasonCode(0) + KeyCompromise = RevocationReasonCode(1) + CACompromise = RevocationReasonCode(2) + AffiliationChanged = RevocationReasonCode(3) + Superseded = RevocationReasonCode(4) + CessationOfOperation = RevocationReasonCode(5) + CertificateHold = RevocationReasonCode(6) + RemoveFromCRL = RevocationReasonCode(8) + PrivilegeWithdrawn = RevocationReasonCode(9) + AACompromise = RevocationReasonCode(10) +) + +// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13 +type ReasonFlag int + +// ReasonFlag values. +const ( + UnusedFlag ReasonFlag = 1 << iota + KeyCompromiseFlag + CACompromiseFlag + AffiliationChangedFlag + SupersededFlag + CessationOfOperationFlag + CertificateHoldFlag + PrivilegeWithdrawnFlag + AACompromiseFlag +) + +// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1. +// It has the same content as pkix.CertificateList, but the contents include parsed versions +// of any extensions. +type CertificateList struct { + Raw asn1.RawContent + TBSCertList TBSCertList + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// ExpiredAt reports whether now is past the expiry time of certList. +func (certList *CertificateList) ExpiredAt(now time.Time) bool { + return now.After(certList.TBSCertList.NextUpdate) +} + +// Indication of whether extensions need to be critical or non-critical. Extensions that +// can be either are omitted from the map. +var listExtCritical = map[string]bool{ + // From RFC 5280... + OIDExtensionAuthorityKeyId.String(): false, // s5.2.1 + OIDExtensionIssuerAltName.String(): false, // s5.2.2 + OIDExtensionCRLNumber.String(): false, // s5.2.3 + OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4 + OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5 + OIDExtensionFreshestCRL.String(): false, // s5.2.6 + OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7 +} + +var certExtCritical = map[string]bool{ + // From RFC 5280... + OIDExtensionCRLReasons.String(): false, // s5.3.1 + OIDExtensionInvalidityDate.String(): false, // s5.3.2 + OIDExtensionCertificateIssuer.String(): true, // s5.3.3 +} + +// IssuingDistributionPoint represents the ASN.1 structure of the same +// name +type IssuingDistributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + OnlyContainsUserCerts bool `asn1:"optional,tag:1"` + OnlyContainsCACerts bool `asn1:"optional,tag:2"` + OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"` + IndirectCRL bool `asn1:"optional,tag:4"` + OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"` +} + +// TBSCertList represents the ASN.1 structure of the same name from RFC +// 5280, section 5.1. It has the same content as pkix.TBSCertificateList +// but the extensions are included in a parsed format. +type TBSCertList struct { + Raw asn1.RawContent + Version int + Signature pkix.AlgorithmIdentifier + Issuer pkix.RDNSequence + ThisUpdate time.Time + NextUpdate time.Time + RevokedCertificates []*RevokedCertificate + Extensions []pkix.Extension + // Cracked out extensions: + AuthorityKeyID []byte + IssuerAltNames GeneralNames + CRLNumber int + BaseCRLNumber int // -1 if no delta CRL present + IssuingDistributionPoint IssuingDistributionPoint + IssuingDPFullNames GeneralNames + FreshestCRLDistributionPoint []string + OCSPServer []string + IssuingCertificateURL []string +} + +// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given +// bytes. It's often the case that PEM encoded CRLs will appear where they +// should be DER encoded, so this function will transparently handle PEM +// encoding as long as there isn't any leading garbage. +func ParseCertificateList(clBytes []byte) (*CertificateList, error) { + if bytes.HasPrefix(clBytes, pemCRLPrefix) { + block, _ := pem.Decode(clBytes) + if block != nil && block.Type == pemType { + clBytes = block.Bytes + } + } + return ParseCertificateListDER(clBytes) +} + +// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes. +// For non-fatal errors, this function returns both an error and a CertificateList +// object. +func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) { + var errs Errors + // First parse the DER into the pkix structures. + pkixList := new(pkix.CertificateList) + if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil { + errs.AddID(ErrInvalidCertList, err) + return nil, &errs + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertList) + return nil, &errs + } + + // Transcribe the revoked certs but crack out extensions. + revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates)) + for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates { + revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs) + if revokedCerts[i] == nil { + return nil, &errs + } + } + + certList := CertificateList{ + Raw: derBytes, + TBSCertList: TBSCertList{ + Raw: pkixList.TBSCertList.Raw, + Version: pkixList.TBSCertList.Version, + Signature: pkixList.TBSCertList.Signature, + Issuer: pkixList.TBSCertList.Issuer, + ThisUpdate: pkixList.TBSCertList.ThisUpdate, + NextUpdate: pkixList.TBSCertList.NextUpdate, + RevokedCertificates: revokedCerts, + Extensions: pkixList.TBSCertList.Extensions, + CRLNumber: -1, + BaseCRLNumber: -1, + }, + SignatureAlgorithm: pkixList.SignatureAlgorithm, + SignatureValue: pkixList.SignatureValue, + } + + // Now crack out extensions. + for _, e := range certList.TBSCertList.Extensions { + if expectCritical, present := listExtCritical[e.Id.String()]; present { + if e.Critical && !expectCritical { + errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id) + } else if !e.Critical && expectCritical { + errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id) + } + } + switch { + case e.Id.Equal(OIDExtensionAuthorityKeyId): + // RFC 5280 s5.2.1 + var a authKeyId + if rest, err := asn1.Unmarshal(e.Value, &a); err != nil { + errs.AddID(ErrInvalidCertListAuthKeyID, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListAuthKeyID) + } + certList.TBSCertList.AuthorityKeyID = a.Id + case e.Id.Equal(OIDExtensionIssuerAltName): + // RFC 5280 s5.2.2 + if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil { + errs.AddID(ErrInvalidCertListIssuerAltName, err) + } + case e.Id.Equal(OIDExtensionCRLNumber): + // RFC 5280 s5.2.3 + if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil { + errs.AddID(ErrInvalidCertListCRLNumber, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListCRLNumber) + } + if certList.TBSCertList.CRLNumber < 0 { + errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber) + } + case e.Id.Equal(OIDExtensionDeltaCRLIndicator): + // RFC 5280 s5.2.4 + if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil { + errs.AddID(ErrInvalidCertListDeltaCRL, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListDeltaCRL) + } + if certList.TBSCertList.BaseCRLNumber < 0 { + errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber) + } + case e.Id.Equal(OIDExtensionIssuingDistributionPoint): + parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs) + case e.Id.Equal(OIDExtensionFreshestCRL): + // RFC 5280 s5.2.6 + if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil { + errs.AddID(ErrInvalidCertListFreshestCRL, err) + return nil, err + } + case e.Id.Equal(OIDExtensionAuthorityInfoAccess): + // RFC 5280 s5.2.7 + var aia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { + errs.AddID(ErrInvalidCertListAuthInfoAccess, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListAuthInfoAccess) + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != tagURI { + continue + } + switch { + case v.Method.Equal(OIDAuthorityInfoAccessOCSP): + certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes)) + case v.Method.Equal(OIDAuthorityInfoAccessIssuers): + certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes)) + } + // TODO(drysdale): cope with more possibilities + } + default: + if e.Critical { + errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id) + } + } + } + + if errs.Fatal() { + return nil, &errs + } + if errs.Empty() { + return &certList, nil + } + return &certList, &errs +} + +func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) { + // RFC 5280 s5.2.5 + if rest, err := asn1.Unmarshal(data, idp); err != nil { + errs.AddID(ErrInvalidCertListIssuingDP, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListIssuingDP) + } + + typeCount := 0 + if idp.OnlyContainsUserCerts { + typeCount++ + } + if idp.OnlyContainsCACerts { + typeCount++ + } + if idp.OnlyContainsAttributeCerts { + typeCount++ + } + if typeCount > 1 { + errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts) + } + for _, fn := range idp.DistributionPoint.FullName { + if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil { + errs.AddID(ErrCertListIssuingDPInvalidFullName, err) + } + } +} + +// RevokedCertificate represents the unnamed ASN.1 structure that makes up the +// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1. +// It has the same content as pkix.RevokedCertificate but the extensions are +// included in a parsed format. +type RevokedCertificate struct { + pkix.RevokedCertificate + // Cracked out extensions: + RevocationReason RevocationReasonCode + InvalidityDate time.Time + Issuer GeneralNames +} + +func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate { + result := RevokedCertificate{RevokedCertificate: pkixRevoked} + for _, e := range pkixRevoked.Extensions { + if expectCritical, present := certExtCritical[e.Id.String()]; present { + if e.Critical && !expectCritical { + errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id) + } else if !e.Critical && expectCritical { + errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id) + } + } + switch { + case e.Id.Equal(OIDExtensionCRLReasons): + // RFC 5280, s5.3.1 + var reason asn1.Enumerated + if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil { + errs.AddID(ErrInvalidRevocationReason, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingRevocationReason) + } + result.RevocationReason = RevocationReasonCode(reason) + case e.Id.Equal(OIDExtensionInvalidityDate): + // RFC 5280, s5.3.2 + if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil { + errs.AddID(ErrInvalidRevocationInvalidityDate, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingRevocationInvalidityDate) + } + case e.Id.Equal(OIDExtensionCertificateIssuer): + // RFC 5280, s5.3.3 + if err := parseGeneralNames(e.Value, &result.Issuer); err != nil { + errs.AddID(ErrInvalidRevocationIssuer, err) + } + default: + if e.Critical { + errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id) + } + } + } + return &result +} + +// CheckCertificateListSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error { + algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root.go b/vendor/github.com/google/certificate-transparency-go/x509/root.go new file mode 100644 index 00000000000..240296247df --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import "sync" + +var ( + once sync.Once + systemRoots *CertPool + systemRootsErr error +) + +func systemRootsPool() *CertPool { + once.Do(initSystemRoots) + return systemRoots +} + +func initSystemRoots() { + systemRoots, systemRootsErr = loadSystemRoots() + if systemRootsErr != nil { + systemRoots = nil + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go b/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go new file mode 100644 index 00000000000..6d427739a43 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/var/ssl/certs/ca-bundle.crt", +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go new file mode 100644 index 00000000000..8c04bdcdfac --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || netbsd || openbsd +// +build dragonfly freebsd netbsd openbsd + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/usr/local/etc/ssl/cert.pem", // FreeBSD + "/etc/ssl/cert.pem", // OpenBSD + "/usr/local/share/certs/ca-root-nss.crt", // DragonFly + "/etc/openssl/certs/ca-certificates.crt", // NetBSD +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go new file mode 100644 index 00000000000..dba99bb8dc1 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go @@ -0,0 +1,315 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && !arm && !arm64 && !ios +// +build cgo,!arm,!arm64,!ios + +package x509 + +/* +#cgo CFLAGS: -mmacosx-version-min=10.10 -D__MAC_OS_X_VERSION_MAX_ALLOWED=101300 +#cgo LDFLAGS: -framework CoreFoundation -framework Security + +#include +#include + +#include +#include + +static Boolean isSSLPolicy(SecPolicyRef policyRef) { + if (!policyRef) { + return false; + } + CFDictionaryRef properties = SecPolicyCopyProperties(policyRef); + if (properties == NULL) { + return false; + } + Boolean isSSL = false; + CFTypeRef value = NULL; + if (CFDictionaryGetValueIfPresent(properties, kSecPolicyOid, (const void **)&value)) { + isSSL = CFEqual(value, kSecPolicyAppleSSL); + } + CFRelease(properties); + return isSSL; +} + +// sslTrustSettingsResult obtains the final kSecTrustSettingsResult value +// for a certificate in the user or admin domain, combining usage constraints +// for the SSL SecTrustSettingsPolicy, ignoring SecTrustSettingsKeyUsage and +// kSecTrustSettingsAllowedError. +// https://developer.apple.com/documentation/security/1400261-sectrustsettingscopytrustsetting +static SInt32 sslTrustSettingsResult(SecCertificateRef cert) { + CFArrayRef trustSettings = NULL; + OSStatus err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainUser, &trustSettings); + + // According to Apple's SecTrustServer.c, "user trust settings overrule admin trust settings", + // but the rules of the override are unclear. Let's assume admin trust settings are applicable + // if and only if user trust settings fail to load or are NULL. + if (err != errSecSuccess || trustSettings == NULL) { + if (trustSettings != NULL) CFRelease(trustSettings); + err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainAdmin, &trustSettings); + } + + // > no trust settings [...] means "this certificate must be verified to a known trusted certificate” + // (Should this cause a fallback from user to admin domain? It's unclear.) + if (err != errSecSuccess || trustSettings == NULL) { + if (trustSettings != NULL) CFRelease(trustSettings); + return kSecTrustSettingsResultUnspecified; + } + + // > An empty trust settings array means "always trust this certificate” with an + // > overall trust setting for the certificate of kSecTrustSettingsResultTrustRoot. + if (CFArrayGetCount(trustSettings) == 0) { + CFRelease(trustSettings); + return kSecTrustSettingsResultTrustRoot; + } + + // kSecTrustSettingsResult is defined as CFSTR("kSecTrustSettingsResult"), + // but the Go linker's internal linking mode can't handle CFSTR relocations. + // Create our own dynamic string instead and release it below. + CFStringRef _kSecTrustSettingsResult = CFStringCreateWithCString( + NULL, "kSecTrustSettingsResult", kCFStringEncodingUTF8); + CFStringRef _kSecTrustSettingsPolicy = CFStringCreateWithCString( + NULL, "kSecTrustSettingsPolicy", kCFStringEncodingUTF8); + CFStringRef _kSecTrustSettingsPolicyString = CFStringCreateWithCString( + NULL, "kSecTrustSettingsPolicyString", kCFStringEncodingUTF8); + + CFIndex m; SInt32 result = 0; + for (m = 0; m < CFArrayGetCount(trustSettings); m++) { + CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, m); + + // First, check if this trust setting is constrained to a non-SSL policy. + SecPolicyRef policyRef; + if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsPolicy, (const void**)&policyRef)) { + if (!isSSLPolicy(policyRef)) { + continue; + } + } + + if (CFDictionaryContainsKey(tSetting, _kSecTrustSettingsPolicyString)) { + // Restricted to a hostname, not a root. + continue; + } + + CFNumberRef cfNum; + if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsResult, (const void**)&cfNum)) { + CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result); + } else { + // > If this key is not present, a default value of + // > kSecTrustSettingsResultTrustRoot is assumed. + result = kSecTrustSettingsResultTrustRoot; + } + + // If multiple dictionaries match, we are supposed to "OR" them, + // the semantics of which are not clear. Since TrustRoot and TrustAsRoot + // are mutually exclusive, Deny should probably override, and Invalid and + // Unspecified be overridden, approximate this by stopping at the first + // TrustRoot, TrustAsRoot or Deny. + if (result == kSecTrustSettingsResultTrustRoot) { + break; + } else if (result == kSecTrustSettingsResultTrustAsRoot) { + break; + } else if (result == kSecTrustSettingsResultDeny) { + break; + } + } + + // If trust settings are present, but none of them match the policy... + // the docs don't tell us what to do. + // + // "Trust settings for a given use apply if any of the dictionaries in the + // certificate’s trust settings array satisfies the specified use." suggests + // that it's as if there were no trust settings at all, so we should probably + // fallback to the admin trust settings. TODO. + if (result == 0) { + result = kSecTrustSettingsResultUnspecified; + } + + CFRelease(_kSecTrustSettingsPolicy); + CFRelease(_kSecTrustSettingsPolicyString); + CFRelease(_kSecTrustSettingsResult); + CFRelease(trustSettings); + + return result; +} + +// isRootCertificate reports whether Subject and Issuer match. +static Boolean isRootCertificate(SecCertificateRef cert, CFErrorRef *errRef) { + CFDataRef subjectName = SecCertificateCopyNormalizedSubjectContent(cert, errRef); + if (*errRef != NULL) { + return false; + } + CFDataRef issuerName = SecCertificateCopyNormalizedIssuerContent(cert, errRef); + if (*errRef != NULL) { + CFRelease(subjectName); + return false; + } + Boolean equal = CFEqual(subjectName, issuerName); + CFRelease(subjectName); + CFRelease(issuerName); + return equal; +} + +// CopyPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates +// for the kSecTrustSettingsPolicy SSL. +// +// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root +// certificates of the system. On failure, the function returns -1. +// Additionally, it fills untrustedPemRoots with certs that must be removed from pemRoots. +// +// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must +// be released (using CFRelease) after we've consumed its content. +static int CopyPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots, bool debugDarwinRoots) { + int i; + + if (debugDarwinRoots) { + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultInvalid = %d\n", kSecTrustSettingsResultInvalid); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustRoot = %d\n", kSecTrustSettingsResultTrustRoot); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustAsRoot = %d\n", kSecTrustSettingsResultTrustAsRoot); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultDeny = %d\n", kSecTrustSettingsResultDeny); + fprintf(stderr, "crypto/x509: kSecTrustSettingsResultUnspecified = %d\n", kSecTrustSettingsResultUnspecified); + } + + // Get certificates from all domains, not just System, this lets + // the user add CAs to their "login" keychain, and Admins to add + // to the "System" keychain + SecTrustSettingsDomain domains[] = { kSecTrustSettingsDomainSystem, + kSecTrustSettingsDomainAdmin, kSecTrustSettingsDomainUser }; + + int numDomains = sizeof(domains)/sizeof(SecTrustSettingsDomain); + if (pemRoots == NULL || untrustedPemRoots == NULL) { + return -1; + } + + CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0); + CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0); + for (i = 0; i < numDomains; i++) { + int j; + CFArrayRef certs = NULL; + OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs); + if (err != noErr) { + continue; + } + + CFIndex numCerts = CFArrayGetCount(certs); + for (j = 0; j < numCerts; j++) { + SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, j); + if (cert == NULL) { + continue; + } + + SInt32 result; + if (domains[i] == kSecTrustSettingsDomainSystem) { + // Certs found in the system domain are always trusted. If the user + // configures "Never Trust" on such a cert, it will also be found in the + // admin or user domain, causing it to be added to untrustedPemRoots. The + // Go code will then clean this up. + result = kSecTrustSettingsResultTrustRoot; + } else { + result = sslTrustSettingsResult(cert); + if (debugDarwinRoots) { + CFErrorRef errRef = NULL; + CFStringRef summary = SecCertificateCopyShortDescription(NULL, cert, &errRef); + if (errRef != NULL) { + fprintf(stderr, "crypto/x509: SecCertificateCopyShortDescription failed\n"); + CFRelease(errRef); + continue; + } + + CFIndex length = CFStringGetLength(summary); + CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1; + char *buffer = malloc(maxSize); + if (CFStringGetCString(summary, buffer, maxSize, kCFStringEncodingUTF8)) { + fprintf(stderr, "crypto/x509: %s returned %d\n", buffer, (int)result); + } + free(buffer); + CFRelease(summary); + } + } + + CFMutableDataRef appendTo; + // > Note the distinction between the results kSecTrustSettingsResultTrustRoot + // > and kSecTrustSettingsResultTrustAsRoot: The former can only be applied to + // > root (self-signed) certificates; the latter can only be applied to + // > non-root certificates. + if (result == kSecTrustSettingsResultTrustRoot) { + CFErrorRef errRef = NULL; + if (!isRootCertificate(cert, &errRef) || errRef != NULL) { + if (errRef != NULL) CFRelease(errRef); + continue; + } + + appendTo = combinedData; + } else if (result == kSecTrustSettingsResultTrustAsRoot) { + CFErrorRef errRef = NULL; + if (isRootCertificate(cert, &errRef) || errRef != NULL) { + if (errRef != NULL) CFRelease(errRef); + continue; + } + + appendTo = combinedData; + } else if (result == kSecTrustSettingsResultDeny) { + appendTo = combinedUntrustedData; + } else if (result == kSecTrustSettingsResultUnspecified) { + // Certificates with unspecified trust should probably be added to a pool of + // intermediates for chain building, or checked for transitive trust and + // added to the root pool (which is an imprecise approximation because it + // cuts chains short) but we don't support either at the moment. TODO. + continue; + } else { + continue; + } + + CFDataRef data = NULL; + err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data); + if (err != noErr) { + continue; + } + if (data != NULL) { + CFDataAppendBytes(appendTo, CFDataGetBytePtr(data), CFDataGetLength(data)); + CFRelease(data); + } + } + CFRelease(certs); + } + *pemRoots = combinedData; + *untrustedPemRoots = combinedUntrustedData; + return 0; +} +*/ +import "C" +import ( + "errors" + "unsafe" +) + +func loadSystemRoots() (*CertPool, error) { + var data, untrustedData C.CFDataRef + err := C.CopyPEMRootsCTX509(&data, &untrustedData, C.bool(debugDarwinRoots)) + if err == -1 { + return nil, errors.New("crypto/x509: failed to load darwin system roots with cgo") + } + defer C.CFRelease(C.CFTypeRef(data)) + defer C.CFRelease(C.CFTypeRef(untrustedData)) + + buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data))) + roots := NewCertPool() + roots.AppendCertsFromPEM(buf) + + if C.CFDataGetLength(untrustedData) == 0 { + return roots, nil + } + + buf = C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(untrustedData)), C.int(C.CFDataGetLength(untrustedData))) + untrustedRoots := NewCertPool() + untrustedRoots.AppendCertsFromPEM(buf) + + trustedRoots := NewCertPool() + for _, c := range roots.certs { + if !untrustedRoots.contains(c) { + trustedRoots.AddCert(c) + } + } + return trustedRoots, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go new file mode 100644 index 00000000000..4330ae97a44 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go @@ -0,0 +1,288 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go + +package x509 + +import ( + "bufio" + "bytes" + "crypto/sha1" + "encoding/pem" + "fmt" + "io" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + "sync" +) + +var debugDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1") + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +// This code is only used when compiling without cgo. +// It is here, instead of root_nocgo_darwin.go, so that tests can check it +// even if the tests are run with cgo enabled. +// The linker will not include these unused functions in binaries built with cgo enabled. + +// execSecurityRoots finds the macOS list of trusted root certificates +// using only command-line tools. This is our fallback path when cgo isn't available. +// +// The strategy is as follows: +// +// 1. Run "security trust-settings-export" and "security +// trust-settings-export -d" to discover the set of certs with some +// user-tweaked trust policy. We're too lazy to parse the XML +// (Issue 26830) to understand what the trust +// policy actually is. We just learn that there is _some_ policy. +// +// 2. Run "security find-certificate" to dump the list of system root +// CAs in PEM format. +// +// 3. For each dumped cert, conditionally verify it with "security +// verify-cert" if that cert was in the set discovered in Step 1. +// Without the Step 1 optimization, running "security verify-cert" +// 150-200 times takes 3.5 seconds. With the optimization, the +// whole process takes about 180 milliseconds with 1 untrusted root +// CA. (Compared to 110ms in the cgo path) +func execSecurityRoots() (*CertPool, error) { + hasPolicy, err := getCertsWithTrustPolicy() + if err != nil { + return nil, err + } + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: %d certs have a trust policy\n", len(hasPolicy)) + } + + keychains := []string{"/Library/Keychains/System.keychain"} + + // Note that this results in trusting roots from $HOME/... (the environment + // variable), which might not be expected. + u, err := user.Current() + if err != nil { + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: can't get user home directory: %v\n", err) + } + } else { + keychains = append(keychains, + filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain"), + + // Fresh installs of Sierra use a slightly different path for the login keychain + filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain-db"), + ) + } + + type rootCandidate struct { + c *Certificate + system bool + } + + var ( + mu sync.Mutex + roots = NewCertPool() + numVerified int // number of execs of 'security verify-cert', for debug stats + wg sync.WaitGroup + verifyCh = make(chan rootCandidate) + ) + + // Using 4 goroutines to pipe into verify-cert seems to be + // about the best we can do. The verify-cert binary seems to + // just RPC to another server with coarse locking anyway, so + // running 16 at a time for instance doesn't help at all. Due + // to the "if hasPolicy" check below, though, we will rarely + // (or never) call verify-cert on stock macOS systems, though. + // The hope is that we only call verify-cert when the user has + // tweaked their trust policy. These 4 goroutines are only + // defensive in the pathological case of many trust edits. + for i := 0; i < 4; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for cert := range verifyCh { + sha1CapHex := fmt.Sprintf("%X", sha1.Sum(cert.c.Raw)) + + var valid bool + verifyChecks := 0 + if hasPolicy[sha1CapHex] { + verifyChecks++ + valid = verifyCertWithSystem(cert.c) + } else { + // Certificates not in SystemRootCertificates without user + // or admin trust settings are not trusted. + valid = cert.system + } + + mu.Lock() + numVerified += verifyChecks + if valid { + roots.AddCert(cert.c) + } + mu.Unlock() + } + }() + } + err = forEachCertInKeychains(keychains, func(cert *Certificate) { + verifyCh <- rootCandidate{c: cert, system: false} + }) + if err != nil { + close(verifyCh) + return nil, err + } + err = forEachCertInKeychains([]string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + }, func(cert *Certificate) { + verifyCh <- rootCandidate{c: cert, system: true} + }) + if err != nil { + close(verifyCh) + return nil, err + } + close(verifyCh) + wg.Wait() + + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: ran security verify-cert %d times\n", numVerified) + } + + return roots, nil +} + +func forEachCertInKeychains(paths []string, f func(*Certificate)) error { + args := append([]string{"find-certificate", "-a", "-p"}, paths...) + cmd := exec.Command("/usr/bin/security", args...) + data, err := cmd.Output() + if err != nil { + return err + } + for len(data) > 0 { + var block *pem.Block + block, data = pem.Decode(data) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + cert, err := ParseCertificate(block.Bytes) + if err != nil { + continue + } + f(cert) + } + return nil +} + +func verifyCertWithSystem(cert *Certificate) bool { + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", Bytes: cert.Raw, + }) + + f, err := os.CreateTemp("", "cert") + if err != nil { + fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err) + return false + } + defer os.Remove(f.Name()) + if _, err := f.Write(data); err != nil { + fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) + return false + } + if err := f.Close(); err != nil { + fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) + return false + } + cmd := exec.Command("/usr/bin/security", "verify-cert", "-p", "ssl", "-c", f.Name(), "-l", "-L") + var stderr bytes.Buffer + if debugDarwinRoots { + cmd.Stderr = &stderr + } + if err := cmd.Run(); err != nil { + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert rejected %s: %q\n", cert.Subject, bytes.TrimSpace(stderr.Bytes())) + } + return false + } + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert approved %s\n", cert.Subject) + } + return true +} + +// getCertsWithTrustPolicy returns the set of certs that have a +// possibly-altered trust policy. The keys of the map are capitalized +// sha1 hex of the raw cert. +// They are the certs that should be checked against `security +// verify-cert` to see whether the user altered the default trust +// settings. This code is only used for cgo-disabled builds. +func getCertsWithTrustPolicy() (map[string]bool, error) { + set := map[string]bool{} + td, err := os.MkdirTemp("", "x509trustpolicy") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + run := func(file string, args ...string) error { + file = filepath.Join(td, file) + args = append(args, file) + cmd := exec.Command("/usr/bin/security", args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + // If there are no trust settings, the + // `security trust-settings-export` command + // fails with: + // exit status 1, SecTrustSettingsCreateExternalRepresentation: No Trust Settings were found. + // Rather than match on English substrings that are probably + // localized on macOS, just interpret any failure to mean that + // there are no trust settings. + if debugDarwinRoots { + fmt.Fprintf(os.Stderr, "crypto/x509: exec %q: %v, %s\n", cmd.Args, err, stderr.Bytes()) + } + return nil + } + + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + // Gather all the runs of 40 capitalized hex characters. + br := bufio.NewReader(f) + var hexBuf bytes.Buffer + for { + b, err := br.ReadByte() + isHex := ('A' <= b && b <= 'F') || ('0' <= b && b <= '9') + if isHex { + hexBuf.WriteByte(b) + } else { + if hexBuf.Len() == 40 { + set[hexBuf.String()] = true + } + hexBuf.Reset() + } + if err == io.EOF { + break + } + if err != nil { + return err + } + } + + return nil + } + if err := run("user", "trust-settings-export"); err != nil { + return nil, fmt.Errorf("dump-trust-settings (user): %v", err) + } + if err := run("admin", "trust-settings-export", "-d"); err != nil { + return nil, fmt.Errorf("dump-trust-settings (admin): %v", err) + } + return set, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go new file mode 100644 index 00000000000..5c93349b0bc --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go @@ -0,0 +1,4314 @@ +// Code generated by root_darwin_arm_gen --output root_darwin_armx.go; DO NOT EDIT. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && darwin && (arm || arm64 || ios) +// +build cgo +// +build darwin +// +build arm arm64 ios + +package x509 + +func loadSystemRoots() (*CertPool, error) { + p := NewCertPool() + p.AppendCertsFromPEM([]byte(systemRootsPEM)) + return p, nil +} + +const systemRootsPEM = ` +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw +MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD +VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul +CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n +tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl +dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch +PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC ++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O +BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk +ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X +7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz +43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl +pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA +WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFVTCCBD2gAwIBAgIEO/OB0DANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQGEwJj +aDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZpY2VzMSIwIAYDVQQLExlD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQDEw1BZG1pbi1Sb290LUNB +MB4XDTAxMTExNTA4NTEwN1oXDTIxMTExMDA3NTEwN1owbDELMAkGA1UEBhMCY2gx +DjAMBgNVBAoTBWFkbWluMREwDwYDVQQLEwhTZXJ2aWNlczEiMCAGA1UECxMZQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdGllczEWMBQGA1UEAxMNQWRtaW4tUm9vdC1DQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMvgr0QUIv5qF0nyXZ3PXAJi +C4C5Wr+oVTN7oxIkXkxvO0GJToM9n7OVJjSmzBL0zJ2HXj0MDRcvhSY+KiZZc6Go +vDvr5Ua481l7ILFeQAFtumeza+vvxeL5Nd0Maga2miiacLNAKXbAcUYRa0Ov5VZB +++YcOYNNt/aisWbJqA2y8He+NsEgJzK5zNdayvYXQTZN+7tVgWOck16Da3+4FXdy +fH1NCWtZlebtMKtERtkVAaVbiWW24CjZKAiVfggjsiLo3yVMPGj3budLx5D9hEEm +vlyDOtcjebca+AcZglppWMX/iHIrx7740y0zd6cWEqiLIcZCrnpkr/KzwO135GkC +AwEAAaOCAf0wggH5MA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIASBkTCBjjCBiwYI +YIV0AREDAQAwfzArBggrBgEFBQcCAjAfGh1UaGlzIGlzIHRoZSBBZG1pbi1Sb290 +LUNBIENQUzBQBggrBgEFBQcCARZEaHR0cDovL3d3dy5pbmZvcm1hdGlrLmFkbWlu +LmNoL1BLSS9saW5rcy9DUFNfMl8xNl83NTZfMV8xN18zXzFfMC5wZGYwfwYDVR0f +BHgwdjB0oHKgcKRuMGwxFjAUBgNVBAMTDUFkbWluLVJvb3QtQ0ExIjAgBgNVBAsT +GUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxETAPBgNVBAsTCFNlcnZpY2VzMQ4w +DAYDVQQKEwVhZG1pbjELMAkGA1UEBhMCY2gwHQYDVR0OBBYEFIKf+iNzIPGXi7JM +Tb5CxX9mzWToMIGZBgNVHSMEgZEwgY6AFIKf+iNzIPGXi7JMTb5CxX9mzWTooXCk +bjBsMQswCQYDVQQGEwJjaDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZp +Y2VzMSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQD +Ew1BZG1pbi1Sb290LUNBggQ784HQMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0B +AQUFAAOCAQEAeE96XCYRpy6umkPKXDWCRn7INo96ZrWpMggcDORuofHIwdTkgOeM +vWOxDN/yuT7CC3FAaUajbPRbDw0hRMcqKz0aC8CgwcyIyhw/rFK29mfNTG3EviP9 +QSsEbnelFnjpm1wjz4EaBiFjatwpUbI6+Zv3XbEt9QQXBn+c6DeFLe4xvC4B+MTr +a440xTk59pSYux8OHhEvqIwHCkiijGqZhTS3KmGFeBopaR+dJVBRBMoXwzk4B3Hn +0Zib1dEYFZa84vPJZyvxCbLOnPRDJgH6V2uQqbG+6DXVaf/wORVOvF/wzzv0viM/ +RWbEtJZdvo8N3sdtCULzifnxP/V0T9+4ZQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIIGDCCBgCgAwIBAgIGAT8vMXfmMA0GCSqGSIb3DQEBCwUAMIIBCjELMAkGA1UE +BhMCRVMxEjAQBgNVBAgMCUJhcmNlbG9uYTFYMFYGA1UEBwxPQmFyY2Vsb25hIChz +ZWUgY3VycmVudCBhZGRyZXNzIGF0IGh0dHA6Ly93d3cuYW5mLmVzL2VzL2FkZHJl +c3MtZGlyZWNjaW9uLmh0bWwgKTEnMCUGA1UECgweQU5GIEF1dG9yaWRhZCBkZSBD +ZXJ0aWZpY2FjaW9uMRcwFQYDVQQLDA5BTkYgQ2xhc2UgMSBDQTEaMBgGCSqGSIb3 +DQEJARYLaW5mb0BhbmYuZXMxEjAQBgNVBAUTCUc2MzI4NzUxMDEbMBkGA1UEAwwS +QU5GIEdsb2JhbCBSb290IENBMB4XDTEzMDYxMDE3NDUzOFoXDTMzMDYwNTE3NDUz +OFowggEKMQswCQYDVQQGEwJFUzESMBAGA1UECAwJQmFyY2Vsb25hMVgwVgYDVQQH +DE9CYXJjZWxvbmEgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgaHR0cDovL3d3dy5h +bmYuZXMvZXMvYWRkcmVzcy1kaXJlY2Npb24uaHRtbCApMScwJQYDVQQKDB5BTkYg +QXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xFzAVBgNVBAsMDkFORiBDbGFzZSAx +IENBMRowGAYJKoZIhvcNAQkBFgtpbmZvQGFuZi5lczESMBAGA1UEBRMJRzYzMjg3 +NTEwMRswGQYDVQQDDBJBTkYgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDHPi9xy4wynbcUbWjorVUgQKeUAVh937J7P37XmsfH +ZLOBZKIIlhhCtRwnDlg7x+BUvtJOTkIbEGMujDygUQ2s3HDYr5I41hTyM2Pl0cq2 +EuSGEbPIHb3dEX8NAguFexM0jqNjrreN3hM2/+TOkAxSdDJP2aMurlySC5zwl47K +ZLHtcVrkZnkDa0o5iN24hJT4vBDT4t2q9khQ+qb1D8KgCOb02r1PxWXu3vfd6Ha2 +mkdB97iGuEh5gO2n4yOmFS5goFlVA2UdPbbhJsb8oKVKDd+YdCKGQDCkQyG4AjmC +YiNm3UPG/qtftTH5cWri67DlLtm6fyUFOMmO6NSh0RtR745pL8GyWJUanyq/Q4bF +HQB21E+WtTsCaqjGaoFcrBunMypmCd+jUZXl27TYENRFbrwNdAh7m2UztcIyb+Sg +VJFyfvVsBQNvnp7GPimVxXZNc4VpxEXObRuPWQN1oZN/90PcZVqTia/SHzEyTryL +ckhiLG3jZiaFZ7pTZ5I9wti9Pn+4kOHvE3Y/4nEnUo4mTxPX9pOlinF+VCiybtV2 +u1KSlc+YaIM7VmuyndDZCJRXm3v0/qTE7t5A5fArZl9lvibigMbWB8fpD+c1GpGH +Eo8NRY0lkaM+DkIqQoaziIsz3IKJrfdKaq9bQMSlIfameKBZ8fNYTBZrH9KZAIhz +YwIDAQABo4IBfjCCAXowHQYDVR0OBBYEFIf6nt9SdnXsSUogb1twlo+d77sXMB8G +A1UdIwQYMBaAFIf6nt9SdnXsSUogb1twlo+d77sXMA8GA1UdEwEB/wQFMAMBAf8w +DgYDVR0PAQH/BAQDAgEGMIIBFQYDVR0RBIIBDDCCAQiCEWh0dHA6Ly93d3cuYW5m +LmVzgQtpbmZvQGFuZi5lc6SB5TCB4jE0MDIGA1UECQwrR3JhbiBWaWEgZGUgbGVz +IENvcnRzIENhdGFsYW5lcy4gOTk2LiAwODAxODESMBAGA1UEBwwJQmFyY2Vsb25h +MScwJQYDVQQKDB5BTkYgQXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xEjAQBgNV +BAUTCUc2MzI4NzUxMDFZMFcGA1UECwxQSW5zY3JpdGEgZW4gZWwgTWluaXN0ZXJp +byBkZWwgSW50ZXJpb3IgZGUgRXNwYcOxYSBjb24gZWwgbnVtZXJvIG5hY2lvbmFs +IDE3MS40NDMwDQYJKoZIhvcNAQELBQADggIBAIgR9tFTZ9BCYg+HViMxOfF0MHN2 +Pe/eC128ARdS+GH8A4thtbqiH/SOYbWofO/0zssHhNKa5iQEj45lCAb8BANpWJMD +nWkPr6jq2+50a6d0MMgSS2l1rvjSF+3nIrEuicshHXSTi3q/vBLKr7uGKMVFaM68 +XAropIwk6ndlA0JseARSPsbetv7ALESMIZAxlHV1TcctYHd0bB3c/Jz+PLszJQqs +Cg/kBPo2D111OXZkIY8W/fJuG9veR783khAK2gUnC0zLLCNsYzEbdGt8zUmBsAsM +cGxqGm6B6vDXd65OxWqw13xdq/24+5R8Ng1PF9tvfjZkUFBF30CxjWur7P90WiKI +G7IGfr6BE1NgXlhEQQu4F+HizB1ypEPzGWltecXQ4yOzO+H0WfFTjLTYX6VSveyW +DQV18ixF8M4tHP/SwNE+yyv2b2JJ3/3RpxjtFlLk+opJ574x0gD/dMJuWTH0JqVY +3PbRfE1jIxFpk164Qz/Xp7H7w7f6xh+tQCkBs3PUYmnGIZcPwq44Q6JHlCNsKx4K +hxfggTvRCk4w79cUID45c2qDsRCqTPoOo/cbOpcfVhbH9LdMORpmuLwNogRZEUSE +fWpqR9q+0kcQf4zGSWIURIyDrogdpDgoHDxktqgMgc+qA4ZE2WQl1D8hmev53A46 +lUSrWUiWfDXtK3ux +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIIAeDltYNno+AwDQYJKoZIhvcNAQEMBQAwZzEbMBkGA1UE +AwwSQXBwbGUgUm9vdCBDQSAtIEcyMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMw +HhcNMTQwNDMwMTgxMDA5WhcNMzkwNDMwMTgxMDA5WjBnMRswGQYDVQQDDBJBcHBs +ZSBSb290IENBIC0gRzIxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBANgREkhI2imKScUcx+xuM23+TfvgHN6s +XuI2pyT5f1BrTM65MFQn5bPW7SXmMLYFN14UIhHF6Kob0vuy0gmVOKTvKkmMXT5x +ZgM4+xb1hYjkWpIMBDLyyED7Ul+f9sDx47pFoFDVEovy3d6RhiPw9bZyLgHaC/Yu +OQhfGaFjQQscp5TBhsRTL3b2CtcM0YM/GlMZ81fVJ3/8E7j4ko380yhDPLVoACVd +J2LT3VXdRCCQgzWTxb+4Gftr49wIQuavbfqeQMpOhYV4SbHXw8EwOTKrfl+q04tv +ny0aIWhwZ7Oj8ZhBbZF8+NfbqOdfIRqMM78xdLe40fTgIvS/cjTf94FNcX1RoeKz +8NMoFnNvzcytN31O661A4T+B/fc9Cj6i8b0xlilZ3MIZgIxbdMYs0xBTJh0UT8TU +gWY8h2czJxQI6bR3hDRSj4n4aJgXv8O7qhOTH11UL6jHfPsNFL4VPSQ08prcdUFm +IrQB1guvkJ4M6mL4m1k8COKWNORj3rw31OsMiANDC1CvoDTdUE0V+1ok2Az6DGOe +HwOx4e7hqkP0ZmUoNwIx7wHHHtHMn23KVDpA287PT0aLSmWaasZobNfMmRtHsHLD +d4/E92GcdB/O/WuhwpyUgquUoue9G7q5cDmVF8Up8zlYNPXEpMZ7YLlmQ1A/bmH8 +DvmGqmAMQ0uVAgMBAAGjQjBAMB0GA1UdDgQWBBTEmRNsGAPCe8CjoA1/coB6HHcm +jTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQwF +AAOCAgEAUabz4vS4PZO/Lc4Pu1vhVRROTtHlznldgX/+tvCHM/jvlOV+3Gp5pxy+ +8JS3ptEwnMgNCnWefZKVfhidfsJxaXwU6s+DDuQUQp50DhDNqxq6EWGBeNjxtUVA +eKuowM77fWM3aPbn+6/Gw0vsHzYmE1SGlHKy6gLti23kDKaQwFd1z4xCfVzmMX3z +ybKSaUYOiPjjLUKyOKimGY3xn83uamW8GrAlvacp/fQ+onVJv57byfenHmOZ4VxG +/5IFjPoeIPmGlFYl5bRXOJ3riGQUIUkhOb9iZqmxospvPyFgxYnURTbImHy99v6Z +SYA7LNKmp4gDBDEZt7Y6YUX6yfIjyGNzv1aJMbDZfGKnexWoiIqrOEDCzBL/FePw +N983csvMmOa/orz6JopxVtfnJBtIRD6e/J/JzBrsQzwBvDR4yGn1xuZW7AYJNpDr +FEobXsmII9oDMJELuDY++ee1KG++P+w8j2Ud5cAeh6Squpj9kuNsJnfdBrRkBof0 +Tta6SqoWqPQFZ2aWuuJVecMsXUmPgEkrihLHdoBR37q9ZV0+N0djMenl9MU/S60E +inpxLK8JQzcPqOMyT/RFtm2XNuyE9QoB6he7hY1Ck3DDUOUUi78/w0EP3SIEIwiK +um1xRKtzCTrJ+VKACd+66eYWyi4uTLLT3OUEVLLUNIAytbwPF+E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICQzCCAcmgAwIBAgIILcX8iNLFS5UwCgYIKoZIzj0EAwMwZzEbMBkGA1UEAwwS +QXBwbGUgUm9vdCBDQSAtIEczMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMwHhcN +MTQwNDMwMTgxOTA2WhcNMzkwNDMwMTgxOTA2WjBnMRswGQYDVQQDDBJBcHBsZSBS +b290IENBIC0gRzMxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABJjpLz1AcqTtkyJygRMc3RCV8cWjTnHcFBbZDuWmBSp3ZHtf +TjjTuxxEtX/1H7YyYl3J6YRbTzBPEVoA/VhYDKX1DyxNB0cTddqXl5dvMVztK517 +IDvYuVTZXpmkOlEKMaNCMEAwHQYDVR0OBBYEFLuw3qFYM4iapIqZ3r6966/ayySr +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2gA +MGUCMQCD6cHEFl4aXTQY2e3v9GwOAEZLuN+yRhHFD/3meoyhpmvOwgPUnPWTxnS4 +at+qIxUCMG1mihDK1A3UT82NQz60imOlM27jbdoXt2QfyFMm+YhidDkLF1vLUagM +6BgD56KyKA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEuzCCA6OgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQGEwJVUzET +MBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwHhcNMDYwNDI1MjE0 +MDM2WhcNMzUwMjA5MjE0MDM2WjBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBw +bGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx +FjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDkkakJH5HbHkdQ6wXtXnmELes2oldMVeyLGYne+Uts9QerIjAC6Bg+ ++FAJ039BqJj50cpmnCRrEdCju+QbKsMflZ56DKRHi1vUFjczy8QPTc4UadHJGXL1 +XQ7Vf1+b8iUDulWPTV0N8WQ1IxVLFVkds5T39pyez1C6wVhQZ48ItCD3y6wsIG9w +tj8BMIy3Q88PnT3zK0koGsj+zrW5DtleHNbLPbU6rfQPDgCSC7EhFi501TwN22IW +q6NxkkdTVcGvL0Gz+PvjcM3mo0xFfh9Ma1CWQYnEdGILEINBhzOKgbEwWOxaBDKM +aLOPHd5lc/9nXmW8Sdh2nzMUZaF3lMktAgMBAAGjggF6MIIBdjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUK9BpR5R2Cf70a40uQKb3 +R01/CF4wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/CF4wggERBgNVHSAE +ggEIMIIBBDCCAQAGCSqGSIb3Y2QFATCB8jAqBggrBgEFBQcCARYeaHR0cHM6Ly93 +d3cuYXBwbGUuY29tL2FwcGxlY2EvMIHDBggrBgEFBQcCAjCBthqBs1JlbGlhbmNl +IG9uIHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFzc3VtZXMgYWNjZXB0 +YW5jZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJkIHRlcm1zIGFuZCBj +b25kaXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5IGFuZCBjZXJ0aWZp +Y2F0aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMA0GCSqGSIb3DQEBBQUAA4IBAQBc +NplMLXi37Yyb3PN3m/J20ncwT8EfhYOFG5k9RzfyqZtAjizUsZAS2L70c5vu0mQP +y3lPNNiiPvl4/2vIB+x9OYOLUyDTOMSxv5pPCmv/K/xZpwUJfBdAVhEedNO3iyM7 +R6PVbyTi69G3cN8PReEnyvFteO3ntRcXqNx+IjXKJdXZD9Zr1KIkIxH3oayPc4Fg +xhtbCS+SsvhESPBgOJ4V9T0mZyCKM2r3DYLP3uujL/lTaltkwGMzd/c6ByxW69oP +IQ7aunMZT7XZNn/Bh1XZp5m5MkL72NVxnn6hUrcbvZNCJBIqxw8dtk2cXmPIS4AX +UKqK1drk/NAJBzewdXUh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFujCCBKKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhjELMAkGA1UEBhMCVVMx +HTAbBgNVBAoTFEFwcGxlIENvbXB1dGVyLCBJbmMuMS0wKwYDVQQLEyRBcHBsZSBD +b21wdXRlciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxKTAnBgNVBAMTIEFwcGxlIFJv +b3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTA1MDIxMDAwMTgxNFoXDTI1MDIx +MDAwMTgxNFowgYYxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRBcHBsZSBDb21wdXRl +ciwgSW5jLjEtMCsGA1UECxMkQXBwbGUgQ29tcHV0ZXIgQ2VydGlmaWNhdGUgQXV0 +aG9yaXR5MSkwJwYDVQQDEyBBcHBsZSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOSRqQkfkdseR1DrBe1e +eYQt6zaiV0xV7IsZid75S2z1B6siMALoGD74UAnTf0GomPnRymacJGsR0KO75Bsq +wx+VnnoMpEeLW9QWNzPLxA9NzhRp0ckZcvVdDtV/X5vyJQO6VY9NXQ3xZDUjFUsV +WR2zlPf2nJ7PULrBWFBnjwi0IPfLrCwgb3C2PwEwjLdDzw+dPfMrSSgayP7OtbkO +2V4c1ss9tTqt9A8OAJILsSEWLnTVPA3bYharo3GSR1NVwa8vQbP4++NwzeajTEV+ +H0xrUJZBicR0YgsQg0GHM4qBsTBY7FoEMoxos48d3mVz/2deZbxJ2HafMxRloXeU +yS0CAwEAAaOCAi8wggIrMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjAfBgNVHSMEGDAWgBQr0GlH +lHYJ/vRrjS5ApvdHTX8IXjCCASkGA1UdIASCASAwggEcMIIBGAYJKoZIhvdjZAUB +MIIBCTBBBggrBgEFBQcCARY1aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmlj +YXRlYXV0aG9yaXR5L3Rlcm1zLmh0bWwwgcMGCCsGAQUFBwICMIG2GoGzUmVsaWFu +Y2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2Nl +cHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5k +IGNvbmRpdGlvbnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRp +ZmljYXRpb24gcHJhY3RpY2Ugc3RhdGVtZW50cy4wRAYDVR0fBD0wOzA5oDegNYYz +aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L3Jvb3Qu +Y3JsMFUGCCsGAQUFBwEBBEkwRzBFBggrBgEFBQcwAoY5aHR0cHM6Ly93d3cuYXBw +bGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L2Nhc2lnbmVycy5odG1sMA0GCSqG +SIb3DQEBBQUAA4IBAQCd2i0oWC99dgS5BNM+zrdmY06PL9T+S61yvaM5xlJNBZhS +9YlRASR5vhoy9+VEi0tEBzmC1lrKtCBe2a4VXR2MHTK/ODFiSF3H4ZCx+CRA+F9Y +m1FdV53B5f88zHIhbsTp6aF31ywXJsM/65roCwO66bNKcuszCVut5mIxauivL9Wv +Hld2j383LS4CXN1jyfJxuCZA3xWNdUQ/eb3mHZnhQyw+rW++uaT+DjUZUWOxw961 +kj5ReAFziqQjyqSI8R5cH0EWLX6VCqrpiUGYGxrdyyC/R14MJsVVNU3GMIuZZxTH +CR+6R8faAQmHJEKVvRNgGQrv6n8Obs3BREM6StXj +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgILMTI1MzcyODI4MjgwDQYJKoZIhvcNAQELBQAwWDELMAkG +A1UEBhMCSlAxHDAaBgNVBAoTE0phcGFuZXNlIEdvdmVybm1lbnQxDTALBgNVBAsT +BEdQS0kxHDAaBgNVBAMTE0FwcGxpY2F0aW9uQ0EyIFJvb3QwHhcNMTMwMzEyMTUw +MDAwWhcNMzMwMzEyMTUwMDAwWjBYMQswCQYDVQQGEwJKUDEcMBoGA1UEChMTSmFw +YW5lc2UgR292ZXJubWVudDENMAsGA1UECxMER1BLSTEcMBoGA1UEAxMTQXBwbGlj +YXRpb25DQTIgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKaq +rSVl1gAR1uh6dqr05rRL88zDUrSNrKZPtZJxb0a11a2LEiIXJc5F6BR6hZrkIxCo ++rFnUOVtR+BqiRPjrq418fRCxQX3TZd+PCj8sCaRHoweOBqW3FhEl2LjMsjRFUFN +dZh4vqtoqV7tR76kuo6hApfek3SZbWe0BSXulMjtqqS6MmxCEeu+yxcGkOGThchk +KM4fR8fAXWDudjbcMztR63vPctgPeKgZggiQPhqYjY60zxU2pm7dt+JNQCBT2XYq +0HisifBPizJtROouurCp64ndt295D6uBbrjmiykLWa+2SQ1RLKn9nShjZrhwlXOa +2Po7M7xCQhsyrLEy+z0CAwEAAaOBwTCBvjAdBgNVHQ4EFgQUVqesqgIdsqw9kA6g +by5Bxnbne9owDgYDVR0PAQH/BAQDAgEGMHwGA1UdEQR1MHOkcTBvMQswCQYDVQQG +EwJKUDEYMBYGA1UECgwP5pel5pys5Zu95pS/5bqcMRswGQYDVQQLDBLmlL/lupzo +qo3oqLzln7rnm6QxKTAnBgNVBAMMIOOCouODl+ODquOCseODvOOCt+ODp+ODs0NB +MiBSb290MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAH+aCXWs +B9FydC53VzDCBJzUgKaD56WgG5/+q/OAvdVKo6GPtkxgEefK4WCB10jBIFmlYTKL +nZ6X02aD2mUuWD7b5S+lzYxzplG+WCigeVxpL0PfY7KJR8q73rk0EWOgDiUX5Yf0 +HbCwpc9BqHTG6FPVQvSCLVMJEWgmcZR1E02qdog8dLHW40xPYsNJTE5t8XB+w3+m +Bcx4m+mB26jIx1ye/JKSLaaX8ji1bnOVDMA/zqaUMLX6BbfeniCq/BNkyYq6ZO/i +Y+TYmK5rtT6mVbgzPixy+ywRAPtbFi+E0hOe+gXFwctyTiLdhMpLvNIthhoEdlkf +SUJiOxMfFui61/0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIJmzCCB4OgAwIBAgIBATANBgkqhkiG9w0BAQwFADCCAR4xPjA8BgNVBAMTNUF1 +dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s +YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz +dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 +aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh +IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ +KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyMjE4MDgy +MVoXDTMwMTIxNzIzNTk1OVowggEeMT4wPAYDVQQDEzVBdXRvcmlkYWQgZGUgQ2Vy +dGlmaWNhY2lvbiBSYWl6IGRlbCBFc3RhZG8gVmVuZXpvbGFubzELMAkGA1UEBhMC +VkUxEDAOBgNVBAcTB0NhcmFjYXMxGTAXBgNVBAgTEERpc3RyaXRvIENhcGl0YWwx +NjA0BgNVBAoTLVNpc3RlbWEgTmFjaW9uYWwgZGUgQ2VydGlmaWNhY2lvbiBFbGVj +dHJvbmljYTFDMEEGA1UECxM6U3VwZXJpbnRlbmRlbmNpYSBkZSBTZXJ2aWNpb3Mg +ZGUgQ2VydGlmaWNhY2lvbiBFbGVjdHJvbmljYTElMCMGCSqGSIb3DQEJARYWYWNy +YWl6QHN1c2NlcnRlLmdvYi52ZTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBAME77xNS8ZlW47RsBeEaaRZhJoZ4rw785UAFCuPZOAVMqNS1wMYqzy95q6Gk +UO81ER/ugiQX/KMcq/4HBn83fwdYWxPZfwBfK7BP2p/JsFgzYeFP0BXOLmvoJIzl +Jb6FW+1MPwGBjuaZGFImWZsSmGUclb51mRYMZETh9/J5CLThR1exStxHQptwSzra +zNFpkQY/zmj7+YZNA9yDoroVFv6sybYOZ7OxNDo7zkSLo45I7gMwtxqWZ8VkJZkC +8+p0dX6mkhUT0QAV64Zc9HsZiH/oLhEkXjhrgZ28cF73MXIqLx1fyM4kPH1yOJi/ +R72nMwL7D+Sd6mZgI035TxuHXc2/uOwXfKrrTjaJDz8Jp6DdessOkxIgkKXRjP+F +K3ze3n4NUIRGhGRtyvEjK95/2g02t6PeYiYVGur6ruS49n0RAaSS0/LJb6XzaAAe +0mmO2evnEqxIKwy2mZRNPfAVW1l3wCnWiUwryBU6OsbFcFFrQm+00wOicXvOTHBM +aiCVAVZTb9RSLyi+LJ1llzJZO3pq3IRiiBj38Nooo+2ZNbMEciSgmig7YXaUcmud +SVQvLSL+Yw+SqawyezwZuASbp7d/0rutQ59d81zlbMt3J7yB567rT2IqIydQ8qBW +k+fmXzghX+/FidYsh/aK+zZ7Wy68kKHuzEw1Vqkat5DGs+VzAgMBAAGjggLeMIIC +2jASBgNVHRMBAf8ECDAGAQH/AgECMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52 +ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMB0GA1UdDgQWBBStuyIdxuDS +Aaj9dlBSk+2YwU2u0zCCAVAGA1UdIwSCAUcwggFDgBStuyIdxuDSAaj9dlBSk+2Y +wU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRpZmlj +YWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAw +DgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYD +VQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25p +Y2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEgZGUgU2VydmljaW9zIGRlIENl +cnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG9w0BCQEWFmFjcmFpekBz +dXNjZXJ0ZS5nb2IudmWCAQEwDgYDVR0PAQH/BAQDAgEGMDcGA1UdEQQwMC6CD3N1 +c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMFQGA1Ud +HwRNMEswJKAioCCGHmhodHA6Ly93d3cuc3VzY2VydGUuZ29iLnZlL2xjcjAjoCGg +H4YdbGRhcDovL2FjcmFpei5zdXNjZXJ0ZS5nb2IudmUwNwYIKwYBBQUHAQEEKzAp +MCcGCCsGAQUFBzABhhtoaHRwOi8vb2NzcC5zdXNjZXJ0ZS5nb2IudmUwQAYDVR0g +BDkwNzA1BgVghl4BAjAsMCoGCCsGAQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRl +LmdvYi52ZS9kcGMwDQYJKoZIhvcNAQEMBQADggIBAK4qy/zmZ9zBwfW3yOYtLcBT +Oy4szJyPz7/RhNH3bPVH7HbDTGpi6JZ4YXdXMBeJE5qBF4a590Kgj8Rlnltt+Rbo +OFQOU1UDqKuTdBsA//Zry5899fmn8jBUkg4nh09jhHHbLlaUScdz704Zz2+UVg7i +s/r3Legxap60KzmdrmTAE9VKte1TQRgavQwVX5/2mO/J+SCas//UngI+h8SyOucq +mjudYEgBrZaodUsagUfn/+AzFNrGLy+al+5nZeHb8JnCfLHWS0M9ZyhgoeO/czyn +99+5G93VWNv4zfc4KiavHZKrkn8F9pg0ycIZh+OwPT/RE2zq4gTazBMlP3ACIe/p +olkNaOEa8KvgzW96sjBZpMW49zFmyINYkcj+uaNCJrVGsXgdBmkuRGJNWFZ9r0cG +woIaxViFBypsz045r1ESfYPlfDOavBhZ/giR/Xocm9CHkPRY2BApMMR0DUCyGETg +Ql+L3kfdTKzuDjUp2DM9FqysQmaM81YDZufWkMhlZPfHwC7KbNougoLroa5Umeos +bqAXWmk46SwIdWRPLLqbUpDTKooynZKpSYIkkotdgJoVZUUCY+RCO8jsVPEU6ece +SxztNUm5UOta1OJPMwSAKRHOo3ilVb9c6lAixDdvV8MeNbqe6asM1mpCHWbJ/0rg +5Ls9Cxx8hracyp0ev7b0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIIKv++n6Lw6YcwDQYJKoZIhvcNAQEFBQAwKDELMAkGA1UE +BhMCQkUxGTAXBgNVBAMTEEJlbGdpdW0gUm9vdCBDQTIwHhcNMDcxMDA0MTAwMDAw +WhcNMjExMjE1MDgwMDAwWjAoMQswCQYDVQQGEwJCRTEZMBcGA1UEAxMQQmVsZ2l1 +bSBSb290IENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMZzQh6S +/3UPi790hqc/7bIYLS2X+an7mEoj39WN4IzGMhwWLQdC1i22bi+n9fzGhYJdld61 +IgDMqFNAn68KNaJ6x+HK92AQZw6nUHMXU5WfIp8MXW+2QbyM69odRr2nlL/zGsvU ++40OHjPIltfsjFPekx40HopQcSZYtF3CiInaYNKJIT/e1wEYNm7hLHADBGXvmAYr +XR5i3FVr/mZkIV/4L+HXmymvb82fqgxG0YjFnaKVn6w/Fa7yYd/vw2uaItgscf1Y +HewApDgglVrH1Tdjuk+bqv5WRi5j2Qsj1Yr6tSPwiRuhFA0m2kHwOI8w7QUmecFL +TqG4flVSOmlGhHUCAwEAAaOBuzCBuDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zBCBgNVHSAEOzA5MDcGBWA4CQEBMC4wLAYIKwYBBQUHAgEWIGh0dHA6 +Ly9yZXBvc2l0b3J5LmVpZC5iZWxnaXVtLmJlMB0GA1UdDgQWBBSFiuv0xbu+DlkD +lN7WgAEV4xCcOTARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUhYrr9MW7 +vg5ZA5Te1oABFeMQnDkwDQYJKoZIhvcNAQEFBQADggEBAFHYhd27V2/MoGy1oyCc +UwnzSgEMdL8rs5qauhjyC4isHLMzr87lEwEnkoRYmhC598wUkmt0FoqW6FHvv/pK +JaeJtmMrXZRY0c8RcrYeuTlBFk0pvDVTC9rejg7NqZV3JcqUWumyaa7YwBO+mPyW +nIR/VRPmPIfjvCCkpDZoa01gZhz5v6yAlGYuuUGK02XThIAC71AdXkbc98m6tTR8 +KvPG2F9fVJ3bTc0R5/0UAoNmXsimABKgX77OFP67H6dh96tK8QYUn8pJQsKpvO2F +sauBQeYNxUJpU4c5nUwfAA4+Bw11V0SoU7Q2dmSZ3G7rPUZuFF1eR1ONeE3gJ7uO +hXY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy +MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk +D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o +OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A +fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe +IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n +oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK +/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj +rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD +3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE +7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC +yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd +qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI +hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA +SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo +HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB +emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC +AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb +7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x +DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk +F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF +a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT +Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk +BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4 +Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl +cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0 +aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY +F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N +8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe +rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K +/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu +7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC +28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6 +lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E +nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB +0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09 +5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj +WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN +jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s +ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM +OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q +619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn +2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj +o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v +nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG +5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq +pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb +dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0 +BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E +jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo +ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI +ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu +Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg +AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7 +HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA +uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa +TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg +xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q +CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x +O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs +6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg +b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa +MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB +ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw +IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B +AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb +unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d +BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq +7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3 +0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX +roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG +A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j +aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p +26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA +BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud +EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN +BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB +AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd +p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi +1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc +XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0 +eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu +tGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIQX/h7KCtU3I1CoxW1aMmt/zANBgkqhkiG9w0BAQUFADA1 +MRYwFAYDVQQKEw1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENB +IDIwNDgwHhcNMDQwNTE0MjAxNzEyWhcNMjkwNTE0MjAyNTQyWjA1MRYwFAYDVQQK +Ew1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENBIDIwNDgwggEg +MA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCwmrmrp68Kd6ficba0ZmKUeIhH +xmJVhEAyv8CrLqUccda8bnuoqrpu0hWISEWdovyD0My5jOAmaHBKeN8hF570YQXJ +FcjPFto1YYmUQ6iEqDGYeJu5Tm8sUxJszR2tKyS7McQr/4NEb7Y9JHcJ6r8qqB9q +VvYgDxFUl4F1pyXOWWqCZe+36ufijXWLbvLdT6ZeYpzPEApk0E5tzivMW/VgpSdH +jWn0f84bcN5wGyDWbs2mAag8EtKpP6BrXruOIIt6keO1aO6g58QBdKhTCytKmg9l +Eg6CTY5j/e/rmxrbU6YTYK/CfdfHbBcl1HP7R2RQgYCUTOG/rksc35LtLgXfAgED +o1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUJ/PI +FR5umgIJFq0roIlgX9p7L6owEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEF +BQADggEBAJ2dhISjQal8dwy3U8pORFBi71R803UXHOjgxkhLtv5MOhmBVrBW7hmW +Yqpao2TB9k5UM8Z3/sUcuuVdJcr18JOagxEu5sv4dEX+5wW4q+ffy0vhN4TauYuX +cB7w4ovXsNgOnbFp1iqRe6lJT37mjpXYgyc81WhJDtSd9i7rp77rMKSsH0T8lasz +Bvt9YAretIpjsJyp8qS5UwGH0GikJ3+r/+n6yUA4iGe0OcaEb1fJU9u6ju7AQ7L4 +CYNu/2bPPu8Xs1gYJQk0XuPL1hS27PKSb3TkL4Eq1ZKR4OCXPDJoBYVL0fdX4lId +kxpUnwVwwEpxYB5DC2Ae/qPOgRnhCzU= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgIQKTZHquOKrIZKI1byyrdhrzANBgkqhkiG9w0BAQUFADBO +MQswCQYDVQQGEwJ1czEYMBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQ0wCwYDVQQL +EwRGQkNBMRYwFAYDVQQDEw1Db21tb24gUG9saWN5MB4XDTA3MTAxNTE1NTgwMFoX +DTI3MTAxNTE2MDgwMFowTjELMAkGA1UEBhMCdXMxGDAWBgNVBAoTD1UuUy4gR292 +ZXJubWVudDENMAsGA1UECxMERkJDQTEWMBQGA1UEAxMNQ29tbW9uIFBvbGljeTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJeNvTMn5K1b+3i9L0dHbsd4 +6ZOcpN7JHP0vGzk4rEcXwH53KQA7Ax9oD81Npe53uCxiazH2+nIJfTApBnznfKM9 +hBiKHa4skqgf6F5PjY7rPxr4nApnnbBnTfAu0DDew5SwoM8uCjR/VAnTNr2kSVdS +c+md/uRIeUYbW40y5KVIZPMiDZKdCBW/YDyD90ciJSKtKXG3d+8XyaK2lF7IMJCk +FEhcVlcLQUwF1CpMP64Sm1kRdXAHImktLNMxzJJ+zM2kfpRHqpwJCPZLr1LoakCR +xVW9QLHIbVeGlRfmH3O+Ry4+i0wXubklHKVSFzYIWcBCvgortFZRPBtVyYyQd+sC +AwEAAaN7MHkwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFC9Yl9ipBZilVh/72at17wI8NjTHMBIGCSsGAQQBgjcVAQQFAgMBAAEwIwYJ +KwYBBAGCNxUCBBYEFHa3YJbdFFYprHWF03BjwbxHhhyLMA0GCSqGSIb3DQEBBQUA +A4IBAQBgrvNIFkBypgiIybxHLCRLXaCRc+1leJDwZ5B6pb8KrbYq+Zln34PFdx80 +CTj5fp5B4Ehg/uKqXYeI6oj9XEWyyWrafaStsU+/HA2fHprA1RRzOCuKeEBuMPdi +4c2Z/FFpZ2wR3bgQo2jeJqVW/TZsN5hs++58PGxrcD/3SDcJjwtCga1GRrgLgwb0 +Gzigf0/NC++DiYeXHIowZ9z9VKEDfgHLhUyxCynDvux84T8PCVI8L6eaSP436REG +WOE2QYrEtr+O3c5Ks7wawM36GpnScZv6z7zyxFSjiDV2zBssRm8MtNHDYXaSdBHq +S4CNHIkRi+xb/xfJSPzn4AYR4oRe +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0 +MRMwEQYDVQQDEwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQG +EwJJTDAeFw0wNDAzMjQxMTMyMThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMT +CkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNpZ24xCzAJBgNVBAYTAklMMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49qROR+WCf4C9DklBKK +8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTyP2Q2 +98CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb +2CEJKHxNGGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxC +ejVb7Us6eva1jsz/D3zkYDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7Kpi +Xd3DTKaCQeQzC6zJMw9kglcq/QytNuEMrkvF7zuZ2SOzW120V+x0cAwqTwIDAQAB +o4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDovL2Zl +ZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0PAQH/BAQD +AgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRL +AZs+VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWd +foPPbrxHbvUanlR2QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0M +cXS6hMTXcpuEfDhOZAYnKuGntewImbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq +8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb/627HOkthIDYIb6FUtnUdLlp +hbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VGzT2ouvDzuFYk +Res3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U +AGegcQCCSA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGATCCA+mgAwIBAgIRAI9hcRW6eVgXjH0ROqzW264wDQYJKoZIhvcNAQELBQAw +RTEfMB0GA1UEAxMWQ29tU2lnbiBHbG9iYWwgUm9vdCBDQTEVMBMGA1UEChMMQ29t +U2lnbiBMdGQuMQswCQYDVQQGEwJJTDAeFw0xMTA3MTgxMDI0NTRaFw0zNjA3MTYx +MDI0NTVaMEUxHzAdBgNVBAMTFkNvbVNpZ24gR2xvYmFsIFJvb3QgQ0ExFTATBgNV +BAoTDENvbVNpZ24gTHRkLjELMAkGA1UEBhMCSUwwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCyKClzKh3rm6n1nvigmV/VU1D4hSwYW2ro3VqpzpPo0Ph3 +3LguqjXd5juDwN4mpxTpD99d7Xu5X6KGTlMVtfN+bTbA4t3x7DU0Zqn0BE5XuOgs +3GLH41Vmr5wox1bShVpM+IsjcN4E/hMnDtt/Bkb5s33xCG+ohz5dlq0gA9qfr/g4 +O9lkHZXTCeYrmVzd/il4x79CqNvGkdL3um+OKYl8rg1dPtD8UsytMaDgBAopKR+W +igc16QJzCbvcinlETlrzP/Ny76BWPnAQgaYBULax/Q5thVU+N3sEOKp6uviTdD+X +O6i96gARU4H0xxPFI75PK/YdHrHjfjQevXl4J37FJfPMSHAbgPBhHC+qn/014DOx +46fEGXcdw2BFeIIIwbj2GH70VyJWmuk/xLMCHHpJ/nIF8w25BQtkPpkwESL6esaU +b1CyB4Vgjyf16/0nRiCAKAyC/DY/Yh+rDWtXK8c6QkXD2XamrVJo43DVNFqGZzbf +5bsUXqiVDOz71AxqqK+p4ek9374xPNMJ2rB5MLPAPycwI0bUuLHhLy6nAIFHLhut +TNI+6Y/soYpi5JSaEjcY7pxI8WIkUAzr2r+6UoT0vAdyOt7nt1y8844a7szo/aKf +woziHl2O1w6ZXUC30K+ptXVaOiW79pBDcbLZ9ZdbONhS7Ea3iH4HJNwktrBJLQID +AQABo4HrMIHoMA8GA1UdEwEB/wQFMAMBAf8wgYQGA1UdHwR9MHswPKA6oDiGNmh0 +dHA6Ly9mZWRpci5jb21zaWduLmNvLmlsL2NybC9jb21zaWduZ2xvYmFscm9vdGNh +LmNybDA7oDmgN4Y1aHR0cDovL2NybDEuY29tc2lnbi5jby5pbC9jcmwvY29tc2ln +bmdsb2JhbHJvb3RjYS5jcmwwDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBQCRZPY +DUhirGm6rgZbPvuqJpFQsTAfBgNVHSMEGDAWgBQCRZPYDUhirGm6rgZbPvuqJpFQ +sTANBgkqhkiG9w0BAQsFAAOCAgEAk1V5V9701xsfy4mfX+tP9Ln5e9h3N+QMwUfj +kr+k3e8iXOqADjTpUHeBkEee5tJq09ZLp/43F5tZ2eHdYq2ZEX7iWHCnOQet6Yw9 +SU1TahsrGDA6JJD9sdPFnNZooGsU1520e0zNB0dNWwxrWAmu4RsBxvEpWCJbvzQL +dOfyX85RWwli81OiVMBc5XvJ1mxsIIqli45oRynKtsWP7E+b0ISJ1n+XFLdQo/Nm +WA/5sDfT0F5YPzWdZymudMbXitimxC+n4oQE4mbQ4Zm718Iwg3pP9gMMcSc7Qc1J +kJHPH9O7gVubkKHuSYj9T3Ym6c6egL1pb4pz/uT7cT26Fiopc/jdqbe2EAfoJZkv +hlp/zdzOoXTWjiKNA5zmgWnZn943FuE9KMRyKtyi/ezJXCh8ypnqLIKxeFfZl69C +BwJsPXUTuqj8Fic0s3aZmmr7C4jXycP+Q8V+akMEIoHAxcd960b4wVWKqOcI/kZS +Q0cYqWOY1LNjznRt9lweWEfwDBL3FhrHOmD4++1N3FkkM4W+Q1b2WOL24clDMj+i +2n9Iw0lc1llHMSMvA5D0vpsXZpOgcCVahfXczQKi9wQ3oZyonJeWx4/rXdMtagAB +VBYGFuMEUEQtybI+eIbnp5peO2WAAblQI4eTy/jMVowe5tfMEXovV3sz9ULgmGb3 +DscLP1I= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAw +PDEbMBkGA1UEAxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWdu +MQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwx +GzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBDQTEQMA4GA1UEChMHQ29tU2lnbjEL +MAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGtWhf +HZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs49oh +gHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sW +v+bznkqH7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ue +Mv5WJDmyVIRD9YTC2LxBkMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr +9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d19guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt +6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUwAwEB/zBEBgNVHR8EPTA7 +MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29tU2lnblNl +Y3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58 +ADsAj8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkq +hkiG9w0BAQUFAAOCAQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7p +iL1DRYHjZiM/EoZNGeQFsOY3wo3aBijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtC +dsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtpFhpFfTMDZflScZAmlaxMDPWL +kz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP51qJThRv4zdL +hfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx +ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w +MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD +VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx +FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu +ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7 +gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH +fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a +ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT +ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk +c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto +dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt +aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI +hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk +QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/ +h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR +rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 +9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIRANAeRlAAACmMAAAAAgAAAAIwDQYJKoZIhvcNAQEFBQAw +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYNDAeFw0wMDA5MTMwNjIyNTBaFw0yMDA5MTMwNjIyNTBa +MD8xJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0dXJlIFRydXN0IENvLjEXMBUGA1UE +AxMORFNUIFJvb3QgQ0EgWDQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCthX3OFEYY8gSeIYur0O4ypOT68HnDrjLfIutL5PZHRwQGjzCPb9PFo/ihboJ8 +RvfGhBAqpQCo47zwYEhpWm1jB+L/OE/dBBiyn98krfU2NiBKSom2J58RBeAwHGEy +cO+lewyjVvbDDLUy4CheY059vfMjPAftCRXjqSZIolQb9FdPcAoa90mFwB7rKniE +J7vppdrUScSS0+eBrHSUPLdvwyn4RGp+lSwbWYcbg5EpSpE0GRJdchic0YDjvIoC +YHpe7Rkj93PYRTQyU4bhC88ck8tMqbvRYqMRqR+vobbkrj5LLCOQCHV5WEoxWh+0 +E2SpIFe7RkV++MmpIAc0h1tZAgMBAAGjMjAwMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFPCD6nPIP1ubWzdf9UyPWvf0hki9MA0GCSqGSIb3DQEBBQUAA4IBAQCE +G85wl5eEWd7adH6XW/ikGN5salvpq/Fix6yVTzE6CrhlP5LBdkf6kx1bSPL18M45 +g0rw2zA/MWOhJ3+S6U+BE0zPGCuu8YQaZibR7snm3HiHUaZNMu5c8D0x0bcMxDjY +AVVcHCoNiL53Q4PLW27nbY6wwG0ffFKmgV3blxrYWfuUDgGpyPwHwkfVFvz9qjaV +mf12VJffL6W8omBPtgteb6UaT/k1oJ7YI0ldGf+ngpVbRhD+LC3cUtT6GO/BEPZu +8YTV/hbiDH5v3khVqMIeKT6o8IuXGG7F6a6vKwP1F1FwTXf4UC/ivhme7vdUH7B/ +Vv4AEbT8dNfEeFxrkDbh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIE5zCCA8+gAwIBAgIBADANBgkqhkiG9w0BAQUFADCBjTELMAkGA1UEBhMCQ0Ex +EDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoTFEVj +aG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNlcnZp +Y2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMjAeFw0wNTEwMDYxMDQ5MTNa +Fw0zMDEwMDcxMDQ5MTNaMIGNMQswCQYDVQQGEwJDQTEQMA4GA1UECBMHT250YXJp +bzEQMA4GA1UEBxMHVG9yb250bzEdMBsGA1UEChMURWNob3dvcnggQ29ycG9yYXRp +b24xHzAdBgNVBAsTFkNlcnRpZmljYXRpb24gU2VydmljZXMxGjAYBgNVBAMTEUVj +aG93b3J4IFJvb3QgQ0EyMIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA +utU/5BkV15UBf+s+JQruKQxr77s3rjp/RpOtmhHILIiO5gsEWP8MMrfrVEiidjI6 +Qh6ans0KAWc2Dw0/j4qKAQzOSyAZgjcdypNTBZ7muv212DA2Pu41rXqwMrlBrVi/ +KTghfdLlNRu6JrC5y8HarrnRFSKF1Thbzz921kLDRoCi+FVs5eVuK5LvIfkhNAqA +byrTgO3T9zfZgk8upmEkANPDL1+8y7dGPB/d6lk0I5mv8PESKX02TlvwgRSIiTHR +k8++iOPLBWlGp7ZfqTEXkPUZhgrQQvxcrwCUo6mk8TqgxCDP5FgPoHFiPLef5szP +ZLBJDWp7GLyE1PmkQI6WiwIBA6OCAVAwggFMMA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBQ74YEboKs/OyGC1eISrq5QqxSlEzCBugYDVR0j +BIGyMIGvgBQ74YEboKs/OyGC1eISrq5QqxSlE6GBk6SBkDCBjTELMAkGA1UEBhMC +Q0ExEDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoT +FEVjaG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMoIBADBQBgNVHSAESTBH +MEUGCysGAQQB+REKAQMBMDYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuZWNob3dv +cnguY29tL2NhL3Jvb3QyL2Nwcy5wZGYwDQYJKoZIhvcNAQEFBQADggEBAG+nrPi/ +0RpfEzrj02C6JGPUar4nbjIhcY6N7DWNeqBoUulBSIH/PYGNHYx7/lnJefiixPGE +7TQ5xPgElxb9bK8zoAApO7U33OubqZ7M7DlHnFeCoOoIAZnG1kuwKwD5CXKB2a74 +HzcqNnFW0IsBFCYqrVh/rQgJOzDA8POGbH0DeD0xjwBBooAolkKT+7ZItJF1Pb56 +QpDL9G+16F7GkmnKlAIYT3QTS3yFGYChnJcd+6txUPhKi9sSOOmAIaKHnkH9Scz+ +A2cSi4A3wUYXVatuVNHpRb2lygfH3SuCX9MU8Ure3zBlSU1LALtMqI4JmcQmQpIq +zIzvO2jHyu9PQqo= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy +MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA +vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G +CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA +WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ +h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18 +f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN +B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy +vUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEYDCCA0igAwIBAgICATAwDQYJKoZIhvcNAQELBQAwWTELMAkGA1UEBhMCVVMx +GDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UE +AxMYRmVkZXJhbCBDb21tb24gUG9saWN5IENBMB4XDTEwMTIwMTE2NDUyN1oXDTMw +MTIwMTE2NDUyN1owWTELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJu +bWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UEAxMYRmVkZXJhbCBDb21tb24gUG9s +aWN5IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2HX7NRY0WkG/ +Wq9cMAQUHK14RLXqJup1YcfNNnn4fNi9KVFmWSHjeavUeL6wLbCh1bI1FiPQzB6+ +Duir3MPJ1hLXp3JoGDG4FyKyPn66CG3G/dFYLGmgA/Aqo/Y/ISU937cyxY4nsyOl +4FKzXZbpsLjFxZ+7xaBugkC7xScFNknWJidpDDSPzyd6KgqjQV+NHQOGgxXgVcHF +mCye7Bpy3EjBPvmE0oSCwRvDdDa3ucc2Mnr4MrbQNq4iGDGMUHMhnv6DOzCIJOPp +wX7e7ZjHH5IQip9bYi+dpLzVhW86/clTpyBLqtsgqyFOHQ1O5piF5asRR12dP8Qj +wOMUBm7+nQIDAQABo4IBMDCCASwwDwYDVR0TAQH/BAUwAwEB/zCB6QYIKwYBBQUH +AQsEgdwwgdkwPwYIKwYBBQUHMAWGM2h0dHA6Ly9odHRwLmZwa2kuZ292L2ZjcGNh +L2NhQ2VydHNJc3N1ZWRCeWZjcGNhLnA3YzCBlQYIKwYBBQUHMAWGgYhsZGFwOi8v +bGRhcC5mcGtpLmdvdi9jbj1GZWRlcmFsJTIwQ29tbW9uJTIwUG9saWN5JTIwQ0Es +b3U9RlBLSSxvPVUuUy4lMjBHb3Zlcm5tZW50LGM9VVM/Y0FDZXJ0aWZpY2F0ZTti +aW5hcnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUrQx6dVzl85jEeZgOrCj9l/TnAvwwDQYJKoZIhvcNAQELBQAD +ggEBAI9z2uF/gLGH9uwsz9GEYx728Yi3mvIRte9UrYpuGDco71wb5O9Qt2wmGCMi +TR0mRyDpCZzicGJxqxHPkYnos/UqoEfAFMtOQsHdDA4b8Idb7OV316rgVNdF9IU+ +7LQd3nyKf1tNnJaK0KIyn9psMQz4pO9+c+iR3Ah6cFqgr2KBWfgAdKLI3VTKQVZH +venAT+0g3eOlCd+uKML80cgX2BLHb94u6b2akfI8WpQukSKAiaGMWMyDeiYZdQKl +Dn0KJnNR6obLB6jI/WNaNZvSr79PMUjBhHDbNXuaGQ/lj/RqDG8z2esccKIN47lQ +A2EC/0rskqTcLe4qNJMHtyznGI8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo +YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9 +MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy +NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G +A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA +A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0 +Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s +QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV +eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795 +B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh +z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T +AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i +ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w +TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH +MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD +VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE +VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B +AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM +bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi +ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG +VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c +ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/ +AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFSzCCAzOgAwIBAgIRALZLiAfiI+7IXBKtpg4GofIwDQYJKoZIhvcNAQELBQAw +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xMjA5MjgwODU4NTFaFw0zNzEyMzExNTU5NTla +MD8xCzAJBgNVBAYTAlRXMTAwLgYDVQQKDCdHb3Zlcm5tZW50IFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQC2/5c8gb4BWCQnr44BK9ZykjAyG1+bfNTUf+ihYHMwVxAA+lCWJP5Q5ow6ldFX +eYTVZ1MMKoI+GFy4MCYa1l7GLbIEUQ7v3wxjR+vEEghRK5lxXtVpe+FdyXcdIOxW +juVhYC386RyA3/pqg7sFtR4jEpyCygrzFB0g5AaPQySZn7YKk1pzGxY5vgW28Yyl +ZJKPBeRcdvc5w88tvQ7Yy6gOMZvJRg9nU0MEj8iyyIOAX7ryD6uBNaIgIZfOD4k0 +eA/PH07p+4woPN405+2f0mb1xcoxeNLOUNFggmOd4Ez3B66DNJ1JSUPUfr0t4urH +cWWACOQ2nnlwCjyHKenkkpTqBpIpJ3jmrdc96QoLXvTg1oadLXLLi2RW5vSueKWg +OTNYPNyoj420ai39iHPplVBzBN8RiD5C1gJ0+yzEb7xs1uCAb9GGpTJXA9ZN9E4K +mSJ2fkpAgvjJ5E7LUy3Hsbbi08J1J265DnGyNPy/HE7CPfg26QrMWJqhGIZO4uGq +s3NZbl6dtMIIr69c/aQCb/+4DbvVq9dunxpPkUDwH0ZVbaCSw4nNt7H/HLPLo5wK +4/7NqrwB7N1UypHdTxOHpPaY7/1J1lcqPKZc9mA3v9g+fk5oKiMyOr5u5CI9ByTP +isubXVGzMNJxbc5Gim18SjNE2hIvNkvy6fFRCW3bapcOFwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBTVZx3gnHosnMvFmOcdByYqhux0zTAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAJA75cJTQijq9TFOjj2Rnk0J +89ixUuZPrAwxIbvx6pnMg/y2KOTshAcOD06Xu29oRo8OURWV+Do7H1+CDgxxDryR +T64zLiNB9CZrTxOH+nj2LsIPkQWXqmrBap+8hJ4IKifd2ocXhuGzyl3tOKkpboTe +Rmv8JxlQpRJ6jH1i/NrnzLyfSa8GuCcn8on3Fj0Y5r3e9YwSkZ/jBI3+BxQaWqw5 +ghvxOBnhY+OvbLamURfr+kvriyL2l/4QOl+UoEtTcT9a4RD4co+WgN2NApgAYT2N +vC2xR8zaXeEgp4wxXPHj2rkKhkfIoT0Hozymc26Uke1uJDr5yTDRB6iBfSZ9fYTf +hsmL5a4NHr6JSFEVg5iWL0rrczTXdM3Jb9DCuiv2mv6Z3WAUjhv5nDk8f0OJU+jl +wqu+Iq0nOJt3KLejY2OngeepaUXrjnhWzAWEx/uttjB8YwWfLYwkf0uLkvw4Hp+g +pVezbp3YZLhwmmBScMip0P/GnO0QYV7Ngw5u6E0CQUridgR51lQ/ipgyFKDdLZzn +uoJxo4ZVKZnSKdt1OvfbQ/+2W/u3fjWAjg1srnm3Ni2XUqGwB5wH5Ss2zQOXlL0t +DjQG/MAWifw3VOTWzz0TBPKR2ck2Lj7FWtClTILD/y58Jnb38/1FoqVuVa4uzM8s +iTTa9g3nkagQ6hed8vbs +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFHjCCBAagAwIBAgIEAKA3oDANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMC +Q1oxOjA4BgNVBAMMMUkuQ0EgLSBRdWFsaWZpZWQgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHksIDA5LzIwMDkxLTArBgNVBAoMJFBydm7DrSBjZXJ0aWZpa2HEjW7DrSBh +dXRvcml0YSwgYS5zLjE9MDsGA1UECww0SS5DQSAtIEFjY3JlZGl0ZWQgUHJvdmlk +ZXIgb2YgQ2VydGlmaWNhdGlvbiBTZXJ2aWNlczAeFw0wOTA5MDEwMDAwMDBaFw0x +OTA5MDEwMDAwMDBaMIG3MQswCQYDVQQGEwJDWjE6MDgGA1UEAwwxSS5DQSAtIFF1 +YWxpZmllZCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSwgMDkvMjAwOTEtMCsGA1UE +CgwkUHJ2bsOtIGNlcnRpZmlrYcSNbsOtIGF1dG9yaXRhLCBhLnMuMT0wOwYDVQQL +DDRJLkNBIC0gQWNjcmVkaXRlZCBQcm92aWRlciBvZiBDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtTaEy0KC8M9l +4lSaWHMs4+sVV1LwzyJYiIQNeCrv1HHm/YpGIdY/Z640ceankjQvIX7m23BK4OSC +6KO8kZYA3zopOz6GFCOKV2PvLukbc+c2imF6kLHEv6qNA8WxhPbR3xKwlHDwB2yh +Wzo7V3QVgDRG83sugqQntKYC3LnlTGbJpNP+Az72gpO9AHUn/IBhFk4ksc8lYS2L +9GCy9CsmdKSBP78p9w8Lx7vDLqkDgt1/zBrcUWmSSb7AE/BPEeMryQV1IdI6nlGn +BhWkXOYf6GSdayJw86btuxC7viDKNrbp44HjQRaSxnp6O3eto1x4DfiYdw/YbJFe +7EjkxSQBywIDAQABo4IBLjCCASowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwgecGA1UdIASB3zCB3DCB2QYEVR0gADCB0DCBzQYIKwYBBQUHAgIwgcAa +gb1UZW50byBjZXJ0aWZpa2F0IGplIHZ5ZGFuIGpha28ga3ZhbGlmaWtvdmFueSBz +eXN0ZW1vdnkgY2VydGlmaWthdCBwb2RsZSB6YWtvbmEgYy4gMjI3LzIwMDAgU2Iu +IHYgcGxhdG5lbSB6bmVuaS9UaGlzIGlzIHF1YWxpZmllZCBzeXN0ZW0gY2VydGlm +aWNhdGUgYWNjb3JkaW5nIHRvIEN6ZWNoIEFjdCBOby4gMjI3LzIwMDAgQ29sbC4w +HQYDVR0OBBYEFHnL0CPpOmdwkXRP01Hi4CD94Sj7MA0GCSqGSIb3DQEBCwUAA4IB +AQB9laU214hYaBHPZftbDS/2dIGLWdmdSbj1OZbJ8LIPBMxYjPoEMqzAR74tw96T +i6aWRa5WdOWaS6I/qibEKFZhJAVXX5mkx2ewGFLJ+0Go+eTxnjLOnhVF2V2s+57b +m8c8j6/bS6Ij6DspcHEYpfjjh64hE2r0aSpZDjGzKFM6YpqsCJN8qYe2X1qmGMLQ +wvNdjG+nPzCJOOuUEypIWt555ZDLXqS5F7ZjBjlfyDZjEfS2Es9Idok8alf563Mi +9/o+Ba46wMYOkk3P1IlU0RqCajdbliioACKDztAqubONU1guZVzV8tuMASVzbJeL +/GAB7ECTwe1RuKrLYtglMKI9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXzCCA0egAwIBAgIBATANBgkqhkiG9w0BAQUFADCB0DELMAkGA1UEBhMCRVMx +SDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMuVml0 +b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwgTWVk +aXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6MRMw +EQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5wZS5j +b20wHhcNMDMwMTMwMjMwMDAwWhcNMTgwMTMwMjMwMDAwWjCB0DELMAkGA1UEBhMC +RVMxSDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMu +Vml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwg +TWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6 +MRMwEQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5w +ZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1btoCXXhp3xIW +D+Bxl8nUCxkyiazWfpt0e68t+Qt9+lZjKZSdEw2Omj4qvr+ovRmDXO3iWpWVOWDl +3JHJjAzFCe8ZEBNDH+QNYwZHmPBaMYFOYFdbAFVHWvys152C308hcFJ6xWWGmjvl +2eMiEl9P2nR2LWue368DCu+ak7j3gjAXaCOdP1a7Bfr+RW3X2SC5R4Xyp8iHlL5J +PHJD/WBkLrezwzQPdACw8m9EG7q9kUwlNpL32mROujS3ZkT6mQTzJieLiE3X04s0 +uIUqVkk5MhjcHFf7al0N5CzjtTcnXYJKN2Z9EDVskk4olAdGi46eSoZXbjUOP5gk +Ej6wVZAXAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBTqVk/sPIOhFIh4gbIrBSLAB0FbQjANBgkqhkiG9w0BAQUFAAOC +AQEAYp7mEzzhw6o5Hf5+T5kcI+t4BJyiIWy7vHlLs/G8dLYXO81aN/Mzg928eMTR +TxxYZL8dd9uwsJ50TVfX6L0R4Dyw6wikh3fHRrat9ufXi63j5K91Ysr7aXqnF38d +iAgHYkrwC3kuxHBb9C0KBz6h8Q45/KCyN7d37wWAq38yyhPDlaOvyoE6bdUuK5hT +m5EYA5JmPyrhQ1moDOyueWBAjxzMEMj+OAY1H90cLv6wszsqerxRrdTOHBdv7MjB +EIpvEEQkXUxVXAzFuuT6m2t91Lfnwfl/IvljHaVC7DlyyhRYHD6D4Rx+4QKp4tWL +vpw6LkI+gKNJ/YdMCsRZQzEEFA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF8DCCA9igAwIBAgIPBuhGJy8fCo/RhFzjafbVMA0GCSqGSIb3DQEBBQUAMDgx +CzAJBgNVBAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXpl +bnBlLmNvbTAeFw0wNzEyMTMxMzA4MjdaFw0zNzEyMTMwODI3MjVaMDgxCzAJBgNV +BAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXplbnBlLmNv +bTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMnTesoPHqynhugWZWqx +whtFMnGV2f4QW8yv56V5AY+Jw8ryVXH3d753lPNypCxE2J6SmxQ6oeckkAoKVo7F +2CaU4dlI4S0+2gpy3aOZFdqBoof0e24md4lYrdbrDLJBenNubdt6eEHpCIgSfocu +ZhFjbFT7PJ1ywLwu/8K33Q124zrX97RovqL144FuwUZvXY3gTcZUVYkaMzEKsVe5 +o4qYw+w7NMWVQWl+dcI8IMVhulFHoCCQk6GQS/NOfIVFVJrRBSZBsLVNHTO+xAPI +JXzBcNs79AktVCdIrC/hxKw+yMuSTFM5NyPs0wH54AlETU1kwOENWocivK0bo/4m +tRXzp/yEGensoYi0RGmEg/OJ0XQGqcwL1sLeJ4VQJsoXuMl6h1YsGgEebL4TrRCs +tST1OJGh1kva8bvS3ke18byB9llrzxlT6Y0Vy0rLqW9E5RtBz+GGp8rQap+8TI0G +M1qiheWQNaBiXBZO8OOi+gMatCxxs1gs3nsL2xoP694hHwZ3BgOwye+Z/MC5TwuG +KP7Suerj2qXDR2kS4Nvw9hmL7Xtw1wLW7YcYKCwEJEx35EiKGsY7mtQPyvp10gFA +Wo15v4vPS8+qFsGV5K1Mij4XkdSxYuWC5YAEpAN+jb/af6IPl08M0w3719Hlcn4c +yHf/W5oPt64FRuXxqBbsR6QXAgMBAAGjgfYwgfMwgbAGA1UdEQSBqDCBpYEPaW5m +b0BpemVucGUuY29tpIGRMIGOMUcwRQYDVQQKDD5JWkVOUEUgUy5BLiAtIENJRiBB +MDEzMzcyNjAtUk1lcmMuVml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFDMEEG +A1UECQw6QXZkYSBkZWwgTWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAxNCAtIDAxMDEw +IFZpdG9yaWEtR2FzdGVpejAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUHRxlDqjyJXu0kc/ksbHmvVV0bAUwDQYJKoZIhvcNAQEFBQAD +ggIBAMeBRm8hGE+gBe/n1bqXUKJg7aWSFBpSm/nxiEqg3Hh10dUflU7F57dp5iL0 ++CmoKom+z892j+Mxc50m0xwbRxYpB2iEitL7sRskPtKYGCwkjq/2e+pEFhsqxPqg +l+nqbFik73WrAGLRne0TNtsiC7bw0fRue0aHwp28vb5CO7dz0JoqPLRbEhYArxk5 +ja2DUBzIgU+9Ag89njWW7u/kwgN8KRwCfr00J16vU9adF79XbOnQgxCvv11N75B7 +XSus7Op9ACYXzAJcY9cZGKfsK8eKPlgOiofmg59OsjQerFQJTx0CCzl+gQgVuaBp +E8gyK+OtbBPWg50jLbJtooiGfqgNASYJQNntKE6MkyQP2/EeTXp6WuKlWPHcj1+Z +ggwuz7LdmMySlD/5CbOlliVbN/UShUHiGUzGigjB3Bh6Dx4/glmimj4/+eAJn/3B +kUtdyXvWton83x18hqrNA/ILUpLxYm9/h+qrdslsUMIZgq+qHfUgKGgu1fxkN0/P +pUTEvnK0jHS0bKf68r10OEMr3q/53NjgnZ/cPcqlY0S/kqJPTIAcuxrDmkoEVU3K +7iYLHL8CxWTTnn7S05EcS6L1HOUXHA0MUqORH5zwIe0ClG+poEnK6EOMxPQ02nwi +o8ZmPrgbBYhdurz3vOXcFD2nhqi2WVIhA16L4wTtSyoeo09Q +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgIBBDANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJLUjEN +MAsGA1UECgwES0lTQTEuMCwGA1UECwwlS29yZWEgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkgQ2VudHJhbDEWMBQGA1UEAwwNS0lTQSBSb290Q0EgMTAeFw0wNTA4MjQw +ODA1NDZaFw0yNTA4MjQwODA1NDZaMGQxCzAJBgNVBAYTAktSMQ0wCwYDVQQKDARL +SVNBMS4wLAYDVQQLDCVLb3JlYSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBDZW50 +cmFsMRYwFAYDVQQDDA1LSVNBIFJvb3RDQSAxMIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAvATk+hM58DSWIGtsaLv623f/J/es7C/n/fB/bW+MKs0lCVsk +9KFo/CjsySXirO3eyDOE9bClCTqnsUdIxcxPjHmc+QZXfd3uOPbPFLKc6tPAXXdi +8EcNuRpAU1xkcK8IWsD3z3X5bI1kKB4g/rcbGdNaZoNy4rCbvdMlFQ0yb2Q3lIVG +yHK+d9VuHygvx2nt54OJM1jT3qC/QOhDUO7cTWu8peqmyGGO9cNkrwYV3CmLP3WM +vHFE2/yttRcdbYmDz8Yzvb9Fov4Kn6MRXw+5H5wawkbMnChmn3AmPC7fqoD+jMUE +CSVPzZNHPDfqAmeS/vwiJFys0izgXAEzisEZ2wIBA6MyMDAwHQYDVR0OBBYEFL+2 +J9gDWnZlTGEBQVYx5Yt7OtnMMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADggEBABOvUQveimpb5poKyLGQSk6hAp3MiNKrZr097LuxQpVqslxa/6FjZJap +aBV/JV6K+KRzwYCKhQoOUugy50X4TmWAkZl0Q+VFnUkq8JSV3enhMNITbslOsXfl +BM+tWh6UCVrXPAgcrnrpFDLBRa3SJkhyrKhB2vAhhzle3/xk/2F0KpzZm4tfwjeT +2KM3LzuTa7IbB6d/CVDv0zq+IWuKkDsnSlFOa56ch534eJAx7REnxqhZvvwYC/uO +fi5C4e3nCSG9uRPFVmf0JqZCQ5BEVLRxm3bkGhKsGigA35vB1fjbXKP4krG9tNT5 +UNkAAk/bg9ART6RCVmE6fhMy04Qfybo= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz +MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N +IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11 +bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE +RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO +zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5 +bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF +MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1 +VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC +OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW +tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ +q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb +EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+ +Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O +VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 +OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG +A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ +JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD +vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo +D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ +Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW +RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK +HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN +nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM +0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i +UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 +Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg +TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL +BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX +UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl +6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK +9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ +HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI +wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY +XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l +IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo +hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr +so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j +ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js +LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM +BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy +dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh +cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh +YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg +dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp +bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ +YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT +TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ +9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 +jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW +FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz +ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 +ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L +EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu +L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC +O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V +um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh +NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul +F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC +ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w +ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk +aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 +YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg +c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 +d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG +CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF +wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS +Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst +0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc +pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl +CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF +P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK +1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm +KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ +8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm +fyWl8kgAwKQB2j8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4MTgyMjA2MjBaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9 +m2BtRsiMMW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdih +FvkcxC7mlSpnzNApbjyFNDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/ +TilftKaNXXsLmREDA/7n29uj/x2lzZAeAR81sH8A25Bvxn570e56eqeqDFdvpG3F +EzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkCb6dJtDZd0KTeByy2dbco +kdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn7uHbHaBu +HYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNF +vJbNcA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo +19AOeCMgkckkKmUpWyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjC +L3UcPX7ape8eYIVpQtPM+GP+HkM5haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJW +bjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNYMUJDLXT5xp6mig/p/r+D5kNX +JLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzc +K6FptWfUjNP9MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzf +ky9NfEBWMXrrpA9gzXrzvsMnjgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7Ik +Vh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQMbFamIp1TpBcahQq4FJHgmDmHtqB +sfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4HVtA4oJVwIHaM190e +3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtlvrsR +ls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ip +mXeascClOS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HH +b6D0jqTsNFFbjCYDcKF31QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksf +rK/7DZBaZmBwXarNeNQk7shBoJMBkpxqnvy5JMWzFYJ+vq6VK+uxwNrjAWALXmms +hFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCyx/yP2FS1k2Kdzs9Z+z0Y +zirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMWNY6E0F/6 +MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2MjUwNzM4MTRaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvEr +jw0DzpPMLgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r +0rk0X2s682Q2zsKwzxNoysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f +2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJwDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVP +ACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpHWrumnf2U5NGKpV+GY3aF +y6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1aSgJA/MTA +tukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL +6yxSNLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0 +uPoTXGiTOmekl9AbmbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrAL +acywlKinh/LTSlDcX3KwFnUey7QYYpqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velh +k6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3qPyZ7iVNTA6z00yPhOgpD/0Q +VAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqh +b97iEoHF8TwuMA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4R +fbgZPnm3qKhyN2abGu2sEzsOv2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv +/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ82YqZh6NM4OKb3xuqFp1mrjX2lhI +REeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLzo9v/tdhZsnPdTSpx +srpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcsa0vv +aGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciAT +woCqISxxOQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99n +Bjx8Oto0QuFmtEYE3saWmA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5W +t6NlUe07qxS/TFED6F+KBZvuim6c779o+sjaC+NCydAXFJy3SuCvkychVSa1ZC+N +8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TCrvJcwhbtkj6EPnNgiLx2 +9CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX5OfNeOI5 +wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAw +ZzELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdp +dGFsIENlcnRpZmljYXRlIFNlcnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290 +IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcNMzEwNjI1MDg0NTA4WjBnMQswCQYD +VQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2Vy +dGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYgQ0Eg +MjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7Bx +UglgRCgzo3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD +1ycfMQ4jFrclyxy0uYAyXhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPH +oCE2G3pXKSinLr9xJZDzRINpUKTk4RtiGZQJo/PDvO/0vezbE53PnUgJUmfANykR +HvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8LiqG12W0OfvrSdsyaGOx9/ +5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaHZa0zKcQv +idm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHL +OdAGalNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaC +NYGu+HuB5ur+rPQam3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f +46Fq9mDU5zXNysRojddxyNMkM3OxbPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCB +UWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDixzgHcgplwLa7JSnaFp6LNYth +7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/BAQDAgGGMB0G +A1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWB +bj2ITY1x0kbBbkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6x +XCX5145v9Ydkn+0UjrgEjihLj6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98T +PLr+flaYC/NUn81ETm484T4VvwYmneTwkLbUwp4wLh/vx3rEUMfqe9pQy3omywC0 +Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7XwgiG/W9mR4U9s70 +WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH59yL +Gn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm +7JFe3VE/23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4S +nr8PyQUQ3nqjsTzyP6WqJ3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VN +vBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyAHmBR3NdUIR7KYndP+tiPsys6DXhyyWhB +WkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/giuMod89a2GQ+fYWVq6nTI +fI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuWl8PVP3wb +I+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFejCCA2KgAwIBAgIJAN7E8kTzHab8MA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNVBAMTG1N3aXNzU2ln +biBHb2xkIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzMxNDdaFw0zNzA4MDQxMzMx +NDdaMEoxCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNV +BAMTG1N3aXNzU2lnbiBHb2xkIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAMPon8hlWp1nG8FFl7S0h0NbYWCAnvJ/XvlnRN1E+qu1 +q3f/KhlMzm/Ej0Gf4OLNcuDR1FJhQQkKvwpw++CDaWEpytsimlul5t0XlbBvhI46 +PmRaQfsbWPz9Kz6ypOasyYK8zvaV+Jd37Sb2WK6eJ+IPg+zFNljIe8/Vh6GphxoT +Z2EBbaZpnOKQ8StoZfPosHz8gj3erdgKAAlEeROc8P5udXvCvLNZAQt8xdUt8L// +bVfSSYHrtLNQrFv5CxUVjGn/ozkB7fzc3CeXjnuL1Wqm1uAdX80Bkeb1Ipi6LgkY +OG8TqIHS+yE35y20YueBkLDGeVm3Z3X+vo87+jbsr63ST3Q2AeVXqyMEzEpel89+ +xu+MzJUjaY3LOMcZ9taKABQeND1v2gwLw7qX/BFLUmE+vzNnUxC/eBsJwke6Hq9Y +9XWBf71W8etW19lpDAfpNzGwEhwy71bZvnorfL3TPbxqM006PFAQhyfHegpnU9t/ +gJvoniP6+Qg6i6GONFpIM19k05eGBxl9iJTOKnzFat+vvKmfzTqmurtU+X+P388O +WsStmryzOndzg0yTPJBotXxQlRHIgl6UcdBBGPvJxmXszom2ziKzEVs/4J0+Gxho +DaoDoWdZv2udvPjyZS+aQTpF2F7QNmxvOx5jtI6YTBPbIQ6fe+3qoKpxw+ujoNIl +AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBRclwZGNKvfMMV8xQ1VcWYwtWCPnjAfBgNVHSMEGDAWgBRclwZGNKvfMMV8 +xQ1VcWYwtWCPnjANBgkqhkiG9w0BAQsFAAOCAgEAd0tN3uqFSqssJ9ZFx/FfIMFb +YO0Hy6Iz3DbPx5TxBsfV2s/NrYQ+/xJIf0HopWZXMMQd5KcaLy1Cwe9Gc7LV9Vr9 +Dnpr0sgxow1IlldlY1UYwPzkisyYhlurDIonN/ojaFlcJtehwcK5Tiz/KV7mlAu+ +zXJPleiP9ve4Pl7Oz54RyawDKUiKqbamNLmsQP/EtnM3scd/qVHbSypHX0AkB4gG +tySz+3/3sIsz+r8jdaNc/qplGsK+8X2BdwOBsY3XlQ16PEKYt4+pfVDh31IGmqBS +VHiDB2FSCTdeipynxlHRXGPRhNzC29L6Wxg2fWa81CiXL3WWHIQHrIuOUxG+JCGq +Z/LBrYic07B4Z3j101gDIApdIPG152XMDiDj1d/mLxkrhWjBBCbPj+0FU6HdBw7r +QSbHtKksW+NpPWbAYhvAqobAN8MxBIZwOb5rXyFAQaB/5dkPOEtwX0n4hbgrLqof +k0FD+PuydDwfS1dbt9RRoZJKzr4Qou7YFCJ7uUG9jemIqdGPAxpg/z+HiaCZJyJm +sD5onnKIUTidEz5FbQXlRrVz7UOGsRQKHrzaDb8eJFxmjw6+of3G62m8Q3nXA3b5 +3IeZuJjEzX9tEPkQvixC/pwpTYNrCr21jsRIiv0hB6aAfR+b6au9gmFECnEnX22b +kJ6u/zYks2gD1pWMa3M= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWdu +IFBsYXRpbnVtIENBIC0gRzIwHhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAw +WjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMSMwIQYDVQQD +ExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu669y +IIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2Htn +IuJpX+UFeNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+ +6ixuEFGSzH7VozPY1kneWCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5ob +jM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIoj5+saCB9bzuohTEJfwvH6GXp43gOCWcw +izSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/68++QHkwFix7qepF6w9fl ++zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34TaNhxKFrY +zt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaP +pZjydomyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtF +KwH3HBqi7Ri6Cr2D+m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuW +ae5ogObnmLo2t/5u7Su9IPhlGdpVCX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMB +AAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCvzAeHFUdvOMW0 +ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW +IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUA +A4ICAQAIhab1Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0 +uMoI3LQwnkAHFmtllXcBrqS3NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+ +FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4U99REJNi54Av4tHgvI42Rncz7Lj7 +jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8KV2LwUvJ4ooTHbG/ +u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl9x8D +YSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1 +puEa+S1BaYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXa +icYwu+uPyyIIoK6q8QNsOktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbG +DI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSYMdp08YSTcU1f+2BY0fvEwW2JorsgH51x +kcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAciIfNAChs0B0QTwoRqjt8Z +Wr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFgTCCA2mgAwIBAgIIIj+pFyDegZQwDQYJKoZIhvcNAQELBQAwTjELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEoMCYGA1UEAxMfU3dpc3NTaWdu +IFBsYXRpbnVtIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzM0MDRaFw0zNzA4MDQx +MzM0MDRaME4xCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxKDAm +BgNVBAMTH1N3aXNzU2lnbiBQbGF0aW51bSBSb290IENBIC0gRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCUoO8TG59EIBvNxaoiu9nyUj56Wlh35o2h +K8ncpPPksxOUAGKbHPJDUEOBfq8wNkmsGIkMGEW4PsdUbePYmllriholqba1Dbd9 +I/BffagHqfc+hi7IAU3c5jbtHeU3B2kSS+OD0QQcJPAfcHHnGe1zSG6VKxW2VuYC +31bpm/rqpu7gwsO64MzGyHvXbzqVmzqPvlss0qmgOD7WiOGxYhOO3KswZ82oaqZj +K4Kwy8c9Tu1y9n2rMk5lAusPmXT4HBoojA5FAJMsFJ9txxue9orce3jjtJRHHU0F +bYR6kFSynot1woDfhzk/n/tIVAeNoCn1+WBfWnLou5ugQuAIADSjFTwT49YaawKy +lCGjnUG8KmtOMzumlDj8PccrM7MuKwZ0rJsQb8VORfddoVYDLA1fer0e3h13kGva +pS2KTOnfQfTnS+x9lUKfTKkJD0OIPz2T5yv0ekjaaMTdEoAxGl0kVCamJCGzTK3a +Fwg2AlfGnIZwyXXJnnxh2HjmuegUafkcECgSXUt1ULo80GdwVVVWS/s9HNjbeU2X +37ie2xcs1TUHuFCp9473Vv96Z0NPINnKZtY4YEvulDHWDaJIm/80aZTGNfWWiO+q +ZsyBputMU/8ydKe2nZhXtLomqfEzM2J+OrADEVf/3G8RI60+xgrQzFS3LcKTHeXC +pozH2O9T9wIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUVio/kFj0F1oUstcIG4VbVGpUGigwHwYDVR0jBBgwFoAUVio/ +kFj0F1oUstcIG4VbVGpUGigwDQYJKoZIhvcNAQELBQADggIBAGztiudDqHknm7jP +hz5kOBiMEUKShjfgWMMb7gQu94TsgxBoDH94LZzCl442ThbYDuprSK1Pnl0NzA2p +PhiFfsxomTk11tifhsEy+01lsyIUS8iFZtoX/3GRrJxWV95xLFZCv/jNDvCi0//S +IhX70HgKfuGwWs6ON9upnueVz2PyLA3S+m/zyNX7ALf3NWcQ03tS7BAy+L/dXsmm +gqTxsL8dLt0l5L1N8DWpkQFH+BAClFvrPusNutUdYyylLqvn4x6j7kuqX7FmAbSC +WvlGS8fx+N8svv113ZY4mjc6bqXmMhVus5DAOYp0pZWgvg0uiXnNKVaOw15XUcQF +bwRVj4HpTL1ZRssqvE3JHfLGTwXkyAQN925P2sM6nNLC9enGJHoUPhxCMKgCRTGp +/FCp3NyGOA9bkz9/CE5qDSc6EHlWwxW4PgaG9tlwZ691eoviWMzGdU8yVcVsFAko +O/KV5GreLCgHraB9Byjd1Fqj6aZ8E4yZC1J429nR3z5aQ3Z/RmBTws3ndkd8Vc20 +OWQQW5VLNV1EgyTV4C4kDMGAbmkAgAZ3CmaCEAxRbzeJV9vzTOW4ue4jZpdgt1Ld +2Zb7uoo7oE3OXvBETJDMIU8bOphrjjGD+YMIUssZwTVr7qEVW4g/bazyNJJTpjAq +E9fmhqhd2ULSx52peovL3+6iMcLl +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFfjCCA2agAwIBAgIJAKqIsFoLsXabMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJjAkBgNVBAMTHVN3aXNzU2ln +biBTaWx2ZXIgUm9vdCBDQSAtIEczMB4XDTA5MDgwNDEzMTkxNFoXDTM3MDgwNDEz +MTkxNFowTDELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEmMCQG +A1UEAxMdU3dpc3NTaWduIFNpbHZlciBSb290IENBIC0gRzMwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQC+h5sF5nF8Um9t7Dep6bPczF9/01DqIZsE8D2/ +vo7JpRQWMhDPmfzscK1INmckDBcy1inlSjmxN+umeAxsbxnKTvdR2hro+iE4bJWc +L9aLzDsCm78mmxFFtrg0Wh2mVEhSyJ14cc5ISsyneIPcaKtmHncH0zYYCNfUbWD4 +8HnTMzYJkmO3BJr1p5baRa90GvyC46hbDjo/UleYfrycjMHAslrfxH7+DKZUdoN+ +ut3nKvRKNk+HZS6lujmNWWEp89OOJHCMU5sRpUcHsnUFXA2E2UTZzckmRFduAn2V +AdSrJIbuPXD7V/qwKRTQnfLFl8sJyvHyPefYS5bpiC+eR1GKVGWYSNIS5FR3DAfm +vluc8d0Dfo2E/L7JYtX8yTroibVfwgVSYfCcPuwuTYxykY7IQ8GiKF71gCTc4i+H +O1MA5cvwsnyNeRmgiM14+MWKWnflBqzdSt7mcG6+r771sasOCLDboD+Uxb4Subx7 +J3m1MildrsUgI5IDe1Q5sIkiVG0S48N46jpA/aSTrOktiDzbpkdmTN/YF+0W3hrW +10Fmvx2A8aTgZBEpXgwnBWLr5cQEYtHEnwxqVdZYOJxmD537q1SAmZzsSdaCn9pF +1j9TBgO3/R/shn104KS06DK2qgcj+O8kQZ5jMHj0VN2O8Fo4jhJ/eMdvAlYhM864 +uK1pVQIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUoYxFkwoSYwunV18ySn3hIee3PmYwHwYDVR0jBBgwFoAUoYxFkwoS +YwunV18ySn3hIee3PmYwDQYJKoZIhvcNAQELBQADggIBAIeuYW1IOCrGHNxKLoR4 +ScAjKkW4NU3RBfq5BTPEZL3brVQWKrA+DVoo2qYagHMMxEFvr7g0tnfUW44dC4tG +kES1s+5JGInBSzSzhzV0op5FZ+1FcWa2uaElc9fCrIj70h2na9rAWubYWWQ0l2Ug +MTMDT86tCZ6u6cI+GHW0MyUSuwXsULpxQOK93ohGBSGEi6MrHuswMIm/EfVcRPiR +i0tZRQswDcoMT29jvgT+we3gh/7IzVa/5dyOetTWKU6A26ubP45lByL3RM2WHy3H +9Qm2mHD/ONxQFRGEO3+p8NgkVMgXjCsTSdaZf0XRD46/aXI3Uwf05q79Wz55uQbN +uIF4tE2g0DW65K7/00m8Ne1jxrP846thWgW2C+T/qSq+31ROwktcaNqjMqLJTVcY +UzRZPGaZ1zwCeKdMcdC/2/HEPOcB5gTyRPZIJjAzybEBGesC8cwh+joCMBedyF+A +P90lrAKb4xfevcqSFNJSgVPm6vwwZzKpYvaTFxUHMV4PG2n19Km3fC2z7YREMkco +BzuGaUWpxzaWkHJ02BKmcyPRTrm2ejrEKaFQBhG52fQmbmIIEiAW8AFXF9QFNmeX +61H5/zMkDAUPVr/vPRxSjoreaQ9aH/DVAzFEs5LG6nWorrvHYAOImP/HBIRSkIbh +tJOpUC/o69I2rDBgp9ADE7UK +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICqDCCAi2gAwIBAgIQIW4zpcvTiKRvKQe0JzzE2DAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAxIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATXZrUb266zYO5G6ohjdTsqlG3zXxL24w+etgoUU0hS +yNw6s8tIICYSTvqJhNTfkeQpfSgB2dsYQ2mhH7XThhbcx39nI9/fMTGDAzVwsUu3 +yBe7UcvclBfb6gk7dhLeqrWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRlwI0l9Qy6l3eQP54u4Fr1ztXh5DAKBggqhkjOPQQD +AwNpADBmAjEApa7jRlP4mDbjIvouKEkN7jB+M/PsP3FezFWJeJmssv3cHFwzjim5 +axfIEWi13IMHAjEAnMhE2mnCNsNUGRCFAtqdR+9B52wmnQk9922Q0QVEL7C8g5No +8gxFSTm/mQQc0xCg +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIQJDJ18h0v0gkz97RqytDzmDANBgkqhkiG9w0BAQsFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAx +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHOddJZKmZgiJM6kXZBxbje/SD +6Jlz+muxNuCad6BAwoGNAcfMjL2Pffd543pMA03Z+/2HOCgs3ZqLVAjbZ/sbjP4o +ki++t7JIp4Gh2F6Iw8w5QEFa0dzl2hCfL9oBTf0uRnz5LicKaTfukaMbasxEvxvH +w9QRslBglwm9LiL1QYRmn81ApqkAgMEflZKf3vNI79sdd2H8f9/ulqRy0LY+/3gn +r8uSFWkI22MQ4uaXrG7crPaizh5HmbmJtxLmodTNWRFnw2+F2EJOKL5ZVVkElauP +N4C/DfD8HzpkMViBeNfiNfYgPym4jxZuPkjctUwH4fIa6n4KedaovetdhitNAgMB +AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBQzQejIORIVk0jyljIuWvXalF9TYDANBgkqhkiG9w0BAQsFAAOCAQEAFeNzV7EX +tl9JaUSm9l56Z6zS3nVJq/4lVcc6yUQVEG6/MWvL2QeTfxyFYwDjMhLgzMv7OWyP +4lPiPEAz2aSMR+atWPuJr+PehilWNCxFuBL6RIluLRQlKCQBZdbqUqwFblYSCT3Q +dPTXvQbKqDqNVkL6jXI+dPEDct+HG14OelWWLDi3mIXNTTNEyZSPWjEwN0ujOhKz +5zbRIWhLLTjmU64cJVYIVgNnhJ3Gw84kYsdMNs+wBkS39V8C3dlU6S+QTnrIToNA +DJqXPDe/v+z28LSFdyjBC8hnghAXOKK3Buqbvzr46SMHv3TgmDgVVXjucgBcGaP0 +0jPg/73RVDkpDw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICqDCCAi2gAwIBAgIQNBdlEkA7t1aALYDLeVWmHjAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAyIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATR2UqOTA2ESlG6fO/TzPo6mrWnYxM9AeBJPvrBR8mS +szrX/m+c95o6D/UOCgrDP8jnEhSO1dVtmCyzcTIK6yq99tdqIAtnRZzSsr9TImYJ +XdsR8/EFM1ij4rjPfM2Cm72jQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBQ9MvM6qQyQhPmijGkGYVQvh3L+BTAKBggqhkjOPQQD +AwNpADBmAjEAyKapr0F/tckRQhZoaUxcuCcYtpjxwH+QbYfTjEYX8D5P/OqwCMR6 +S7wIL8fip29lAjEA1lnehs5fDspU1cbQFQ78i5Ry1I4AWFPPfrFLDeVQhuuea9// +KabYR9mglhjb8kWz +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIQZIKe/DcedF38l/+XyLH/QTANBgkqhkiG9w0BAQsFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAy +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNzOkFyGOFyz9AYxe9GPo15gRn +V2WYKaRPyVyPDzTS+NqoE2KquB5QZ3iwFkygOakVeq7t0qLA8JA3KRgmXOgNPLZs +ST/B4NzZS7YUGQum05bh1gnjGSYc+R9lS/kaQxwAg9bQqkmi1NvmYji6UBRDbfkx ++FYW2TgCkc/rbN27OU6Z4TBnRfHU8I3D3/7yOAchfQBeVkSz5GC9kSucq1sEcg+y +KNlyqwUgQiWpWwNqIBDMMfAr2jUs0Pual07wgksr2F82owstr2MNHSV/oW5cYqGN +KD6h/Bwg+AEvulWaEbAZ0shQeWsOagXXqgQ2sqPy4V93p3ec5R7c6d9qwWVdAgMB +AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSHjCCVyJhK0daABkqQNETfHE2/sDANBgkqhkiG9w0BAQsFAAOCAQEAgY6ypWaW +tyGltu9vI1pf24HFQqV4wWn99DzX+VxrcHIa/FqXTQCAiIiCisNxDY7FiZss7Y0L +0nJU9X3UXENX6fOupQIR9nYrgVfdfdp0MP1UR/bgFm6mtApI5ud1Bw8pGTnOefS2 +bMVfmdUfS/rfbSw8DVSAcPCIC4DPxmiiuB1w2XaM/O6lyc+tHc+ZJVdaYkXLFmu9 +Sc2lo4xpeSWuuExsi0BmSxY/zwIa3eFsawdhanYVKZl/G92IgMG/tY9zxaaWI4Sm +KIYkM2oBLldzJbZev4/mHWGoQClnHYebHX+bn5nNMdZUvmK7OaxoEkiRIKXLsd3+ +b/xa5IJVWa8xqQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICpzCCAi2gAwIBAgIQTHm1miicdjFk9YlE0JEC3jAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAzIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAARXz+qzOU0/oSHgbi84csaHl/OFC0fnD1HI0fSZm8pZ +Zf9M+eoLtyXV0vbsMS0yYhLXdoan+jjJZdT+c+KEOfhMSWIT3brViKBfPchPsD+P +oVAR5JNGrcNfy/GkapVW6MCjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBQknbzScfcdwiW+IvGJpSwVOzQeXjAKBggqhkjOPQQD +AwNoADBlAjEAuWZoZdsF0Dh9DvPIdWG40CjEsUozUVj78jwQyK5HeHbKZiQXhj5Q +Vm6lLZmIuL0kAjAD6qfnqDzqnWLGX1TamPR3vU+PGJyRXEdrQE0QHbPhicoLIsga +xcX+i93B3294n5E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF9jCCA96gAwIBAgIQZWNxhdNvRcaPfzH5CYeSgjANBgkqhkiG9w0BAQwFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC3DrL6TbyachX7d1vb/UMPywv3 +YC6zK34Mu1PyzE5l8xm7/zUd99Opu0Attd141Kb5N+qFBXttt+YTSwZ8+3ZjjyAd +LTgrBIXy6LDRX01KIclq2JTqHgJQpqqQB6BHIepm+QSg5oPwxPVeluInTWHDs8GM +IrZmoQDRVin77cF/JMo9+lqUsITDx7pDHP1kDvEo+0dZ8ibhMblE+avd+76+LDfj +rAsY0/wBovGkCjWCR0yrvYpe3xOF/CDMSFmvr0FvyyPNypOn3dVfyGQ7/wEDoApP +LW49hL6vyDKyUymQFfewBZoKPPa5BpDJpeFdoDuw/qi2v/WJKFckOiGGceTciotB +VeweMCRZ0cBZuHivqlp03iWAMJjtMERvIXAc2xJTDtamKGaTLB/MTzwbgcW59nhv +0DI6CHLbaw5GF4WU87zvvPekXo7p6bVk5bdLRRIsTDe3YEMKTXEGAJQmNXQfu3o5 +XE475rgD4seTi4QsJUlF3X8jlGAfy+nN9quX92Hn+39igcjcCjBcGHzmzu/Hbh6H +fLPpysh7avRo/IOlDFa0urKNSgrHl5fFiDAVPRAIVBVycmczM/R8t84AJ1NlziTx +WmTnNi/yLgLCl99y6AIeoPc9tftoYAP6M6nmEm0G4amoXU48/tnnAGWsthlNe4N/ +NEfq4RhtsYsceavnnQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUOXEIAD7eyIbnkP/k/SEPziQZFvYwDQYJKoZIhvcN +AQEMBQADggIBAFBriE1gSM5a4yLOZ3yEp80c/ekMA4w2rwqHDmquV64B0Da78v25 +c8FftaiuTKL6ScsHRhY2vePIVzh+OOS/JTNgxtw3nGO7XpgeGrKC8K6mdxGAREeh +KcXwszrOmPC47NMOgAZ3IzBM/3lkYyJbd5NDS3Wz2ztuO0rd8ciutTeKlYg6EGhw +OLlbcH7VQ8n8X0/l5ns27vAg7UdXEyYQXhQGDXt2B8LGLRb0rqdsD7yID08sAraj +1yLmmUc12I2lT4ESOhF9s8wLdfMecKMbA+r6mujmLjY5zJnOOj8Mt674Q5mwk25v +qtkPajGRu5zTtCj7g0x6c4JQZ9IOrO1gxbJdNZjPh34eWR0kvFa62qRa2MzmvB4Q +jxuMjvPB27e+1LBbZY8WaPNWxSoZFk0PuGWHbSSDuGLc4EdhGoh7zk5//dzGDVqa +pPO1TPbdMaboHREhMzAEYX0c4D5PjT+1ixIAWn2poQDUg+twuxj4pNIcgS23CBHI +Jnu21OUPA0Zy1CVAHr5JXW2T8VyyO3VUaTqg7kwiuqya4gitRWMFSlI1dsQ09V4H +Mq3cfCbRW4+t5OaqG3Wf61206MCpFXxOSgdy30bJ1JGSdVaw4e43NmUoxRXIK3bM +bW8Zg/T92hXiQeczeUaDV/nxpbZt07zXU+fucW14qZen7iCcGRVyFT0E +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDcTCCAlmgAwIBAgIVAOYJ/nrqAGiM4CS07SAbH+9StETRMA0GCSqGSIb3DQEB +BQUAMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGlj +emVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIgUk9PVCBDQTAeFw0xMTEyMDYx +MTEwNTdaFw0zMTEyMDYxMTEwNTdaMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIg +Uk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKxHL49ZMTml +6g3wpYwrvQKkvc0Kc6oJ5sxfgmp1qZfluwbv88BdocHSiXlY8NzrVYzuWBp7J/9K +ULMAoWoTIzOQ6C9TNm4YbA9A1jdX1wYNL5Akylf8W5L/I4BXhT9KnlI6x+a7BVAm +nr/Ttl+utT/Asms2fRfEsF2vZPMxH4UFqOAhFjxTkmJWf2Cu4nvRQJHcttB+cEAo +ag/hERt/+tzo4URz6x6r19toYmxx4FjjBkUhWQw1X21re//Hof2+0YgiwYT84zLb +eqDqCOMOXxvH480yGDkh/QoazWX3U75HQExT/iJlwnu7I1V6HXztKIwCBjsxffbH +3jOshCJtywcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFFOSo33/gnbwM9TrkmdHYTMbaDsqMA0GCSqGSIb3DQEBBQUA +A4IBAQA5UFWd5EL/pBviIMm1zD2JLUCpp0mJG7JkwznIOzawhGmFFaxGoxAhQBEg +haP+E0KR66oAwVC6xe32QUVSHfWqWndzbODzLB8yj7WAR0cDM45ZngSBPBuFE3Wu +GLJX9g100ETfIX+4YBR/4NR/uvTnpnd9ete7Whl0ZfY94yuu4xQqB5QFv+P7IXXV +lTOjkjuGXEcyQAjQzbFaT9vIABSbeCXWBbjvOXukJy6WgAiclzGNSYprre8Ryydd +fmjW9HIGwsIO03EldivvqEYL1Hv1w/Pur+6FUEOaL68PEIUovfgwIB2BAw+vZDuw +cH0mX548PojGyg434cDjkSXa3mHF +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGHDCCBASgAwIBAgIES45gAzANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJE +SzESMBAGA1UEChMJVFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQ +cmltYXJ5IENBMB4XDTEwMDMwMzEyNDEzNFoXDTM3MTIwMzEzMTEzNFowRTELMAkG +A1UEBhMCREsxEjAQBgNVBAoTCVRSVVNUMjQwODEiMCAGA1UEAxMZVFJVU1QyNDA4 +IE9DRVMgUHJpbWFyeSBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJlJodr3U1Fa+v8HnyACHV81/wLevLS0KUk58VIABl6Wfs3LLNoj5soVAZv4LBi5 +gs7E8CZ9w0F2CopW8vzM8i5HLKE4eedPdnaFqHiBZ0q5aaaQArW+qKJx1rT/AaXt +alMB63/yvJcYlXS2lpexk5H/zDBUXeEQyvfmK+slAySWT6wKxIPDwVapauFY9QaG ++VBhCa5jBstWS7A5gQfEvYqn6csZ3jW472kW6OFNz6ftBcTwufomGJBMkonf4ZLr +6t0AdRi9jflBPz3MNNRGxyjIuAmFqGocYFA/OODBRjvSHB2DygqQ8k+9tlpvzMRr +kU7jq3RKL+83G1dJ3/LTjCLz4ryEMIC/OJ/gNZfE0qXddpPtzflIPtUFVffXdbFV +1t6XZFhJ+wBHQCpJobq/BjqLWUA86upsDbfwnePtmIPRCemeXkY0qabC+2Qmd2Fe +xyZphwTyMnbqy6FG1tB65dYf3mOqStmLa3RcHn9+2dwNfUkh0tjO2FXD7drWcU0O +I9DW8oAypiPhm/QCjMU6j6t+0pzqJ/S0tdAo+BeiXK5hwk6aR+sRb608QfBbRAs3 +U/q8jSPByenggac2BtTN6cl+AA1Mfcgl8iXWNFVGegzd/VS9vINClJCe3FNVoUnR +YCKkj+x0fqxvBLopOkJkmuZw/yhgMxljUi2qYYGn90OzAgMBAAGjggESMIIBDjAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjARBgNVHSAECjAIMAYGBFUd +IAAwgZcGA1UdHwSBjzCBjDAsoCqgKIYmaHR0cDovL2NybC5vY2VzLnRydXN0MjQw +OC5jb20vb2Nlcy5jcmwwXKBaoFikVjBUMQswCQYDVQQGEwJESzESMBAGA1UEChMJ +VFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQcmltYXJ5IENBMQ0w +CwYDVQQDEwRDUkwxMB8GA1UdIwQYMBaAFPZt+LFIs0FDAduGROUYBbdezAY3MB0G +A1UdDgQWBBT2bfixSLNBQwHbhkTlGAW3XswGNzANBgkqhkiG9w0BAQsFAAOCAgEA +VPAQGrT7dIjD3/sIbQW86f9CBPu0c7JKN6oUoRUtKqgJ2KCdcB5ANhCoyznHpu3m +/dUfVUI5hc31CaPgZyY37hch1q4/c9INcELGZVE/FWfehkH+acpdNr7j8UoRZlkN +15b/0UUBfGeiiJG/ugo4llfoPrp8bUmXEGggK3wyqIPcJatPtHwlb6ympfC2b/Ld +v/0IdIOzIOm+A89Q0utx+1cOBq72OHy8gpGb6MfncVFMoL2fjP652Ypgtr8qN9Ka +/XOazktiIf+2Pzp7hLi92hRc9QMYexrV/nnFSQoWdU8TqULFUoZ3zTEC3F/g2yj+ +FhbrgXHGo5/A4O74X+lpbY2XV47aSuw+DzcPt/EhMj2of7SA55WSgbjPMbmNX0rb +oenSIte2HRFW5Tr2W+qqkc/StixgkKdyzGLoFx/xeTWdJkZKwyjqge2wJqws2upY +EiThhC497+/mTiSuXd69eVUwKyqYp9SD2rTtNmF6TCghRM/dNsJOl+osxDVGcwvt +WIVFF/Onlu5fu1NHXdqNEfzldKDUvCfii3L2iATTZyHwU9CALE+2eIA+PIaLgnM1 +1oCfUnYBkQurTrihvzz9PryCVkLxiqRmBVvUz+D4N5G/wvvKDS6t6cPCS+hqM482 +cbBsn0R9fFLO4El62S9eH1tqOzO20OAOK65yJIsOpSE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS +MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp +bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw +VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy +YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy +dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2 +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe +Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx +GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls +aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU +QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh +xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0 +aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr +IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h +gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK +O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO +fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw +lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID +AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP +NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t +wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM +7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh +gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n +oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs +yZyQ2uypQjyttgI= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc +UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx +c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS +S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg +SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx +OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry +b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC +VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE +sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F +ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY +KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG ++7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG +HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P +IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M +733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk +Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW +AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5 +mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa +XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ +qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBCDANBgkqhkiG9w0BAQUFADA6MQswCQYDVQQGEwJDTjER +MA8GA1UEChMIVW5pVHJ1c3QxGDAWBgNVBAMTD1VDQSBHbG9iYWwgUm9vdDAeFw0w +ODAxMDEwMDAwMDBaFw0zNzEyMzEwMDAwMDBaMDoxCzAJBgNVBAYTAkNOMREwDwYD +VQQKEwhVbmlUcnVzdDEYMBYGA1UEAxMPVUNBIEdsb2JhbCBSb290MIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2rPlBlA/9nP3xDK/RqUlYjOHsGj+p9+I +A2N9Apb964fJ7uIIu527u+RBj8cwiQ9tJMAEbBSUgU2gDXRm8/CFr/hkGd656YGT +0CiFmUdCSiw8OCdKzP/5bBnXtfPvm65bNAbXj6ITBpyKhELVs6OQaG2BkO5NhOxM +cE4t3iQ5zhkAQ5N4+QiGHUPR9HK8BcBn+sBR0smFBySuOR56zUHSNqth6iur8CBV +mTxtLRwuLnWW2HKX4AzKaXPudSsVCeCObbvaE/9GqOgADKwHLx25urnRoPeZnnRc +GQVmMc8+KlL+b5/zub35wYH1N9ouTIElXfbZlJrTNYsgKDdfUet9Ysepk9H50DTL +qScmLCiQkjtVY7cXDlRzq6987DqrcDOsIfsiJrOGrCOp139tywgg8q9A9f9ER3Hd +J90TKKHqdjn5EKCgTUCkJ7JZFStsLSS3JGN490MYeg9NEePorIdCjedYcaSrbqLA +l3y74xNLytu7awj5abQEctXDRrl36v+6++nwOgw19o8PrgaEFt2UVdTvyie3AzzF +HCYq9TyopZWbhvGKiWf4xwxmse1Bv4KmAGg6IjTuHuvlb4l0T2qqaqhXZ1LUIGHB +zlPL/SR/XybfoQhplqCe/klD4tPq2sTxiDEhbhzhzfN1DiBEFsx9c3Q1RSw7gdQg +7LYJjD5IskkCAwEAAaOBojCBnzALBgNVHQ8EBAMCAQYwDAYDVR0TBAUwAwEB/zBj +BgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMDBggrBgEFBQcD +BAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcDBwYIKwYBBQUHAwgGCCsGAQUF +BwMJMB0GA1UdDgQWBBTZw9P4gJJnzF3SOqLXcaK0xDiALTANBgkqhkiG9w0BAQUF +AAOCAgEA0Ih5ygiq9ws0oE4Jwul+NUiJcIQjL1HDKy9e21NrW3UIKlS6Mg7VxnGF +sZdJgPaE0PC6t3GUyHlrpsVE6EKirSUtVy/m1jEp+hmJVCl+t35HNmktbjK81HXa +QnO4TuWDQHOyXd/URHOmYgvbqm4FjMh/Rk85hZCdvBtUKayl1/7lWFZXbSyZoUkh +1WHGjGHhdSTBAd0tGzbDLxLMC9Z4i3WA6UG5iLHKPKkWxk4V43I29tSgQYWvimVw +TbVEEFDs7d9t5tnGwBLxSzovc+k8qe4bqi81pZufTcU0hF8mFGmzI7GJchT46U1R +IgP/SobEHOh7eQrbRyWBfvw0hKxZuFhD5D1DCVR0wtD92e9uWfdyYJl2b/Unp7uD +pEqB7CmB9HdL4UISVdSGKhK28FWbAS7d9qjjGcPORy/AeGEYWsdl/J1GW1fcfA67 +loMQfFUYCQSu0feLKj6g5lDWMDbX54s4U+xJRODPpN/xU3uLWrb2EZBL1nXz/gLz +Ka/wI3J9FO2pXd96gZ6bkiL8HvgBRUGXx2sBYb4zaPKgZYRmvOAqpGjTcezHCN6j +w8k2SjTxF+KAryAhk5Qe5hXTVGLxtTgv48y5ZwSpuuXu+RBuyy5+E6+SFP7zJ3N7 +OPxzbbm5iPZujAv1/P8JDrMtXnt145Ik4ubhWD5LKAN1axibRww= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhDCCAmygAwIBAgIBCTANBgkqhkiG9w0BAQUFADAzMQswCQYDVQQGEwJDTjER +MA8GA1UEChMIVW5pVHJ1c3QxETAPBgNVBAMTCFVDQSBSb290MB4XDTA0MDEwMTAw +MDAwMFoXDTI5MTIzMTAwMDAwMFowMzELMAkGA1UEBhMCQ04xETAPBgNVBAoTCFVu +aVRydXN0MREwDwYDVQQDEwhVQ0EgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBALNdB8qGJn1r4vs4CQ7MgsJqGgCiFV/W6dQBt1YDAVmP9ThpJHbC +XivF9iu/r/tB/Q9a/KvXg3BNMJjRnrJ2u5LWu+kQKGkoNkTo8SzXWHwk1n8COvCB +a2FgP/Qz3m3l6ihST/ypHWN8C7rqrsRoRuTej8GnsrZYWm0dLNmMOreIy4XU9+gD +Xv2yTVDo1h//rgI/i0+WITyb1yXJHT/7mLFZ5PCpO6+zzYUs4mBGzG+OoOvwNMXx +QhhgrhLtRnUc5dipllq+3lrWeGeWW5N3UPJuG96WUUqm1ktDdSFmjXfsAoR2XEQQ +th1hbOSjIH23jboPkXXHjd+8AmCoKai9PUMCAwEAAaOBojCBnzALBgNVHQ8EBAMC +AQYwDAYDVR0TBAUwAwEB/zBjBgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIG +CCsGAQUFBwMDBggrBgEFBQcDBAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcD +BwYIKwYBBQUHAwgGCCsGAQUFBwMJMB0GA1UdDgQWBBTbHzXza0z/QjFkm827Wh4d +SBC37jANBgkqhkiG9w0BAQUFAAOCAQEAOGy3iPGt+lg3dNHocN6cJ1nL5BXXoMNg +14iABMUwTD3UGusGXllH5rxmy+AI/Og17GJ9ysDawXiv5UZv+4mCI4/211NmVaDe +JRI7cTYWVRJ2+z34VFsxugAG+H1V5ad2g6pcSpemKijfvcZsCyOVjjN/Hl5AHxNU +LJzltQ7dFyiuawHTUin1Ih+QOfTcYmjwPIZH7LgFRbu3DJaUxmfLI3HQjnQi1kHr +A6i26r7EARK1s11AdgYg1GS4KUYGis4fk5oQ7vuqWrTcL9Ury/bXBYSYBZELhPc9 ++tb5evosFeo2gkO3t7jj83EB7UNDogVFwygFBzXjAaU4HoDU18PZ3g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB +kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw +IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD +VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu +dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 +E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ +D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK +4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq +lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW +bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB +o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT +MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js +LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr +BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB +AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj +j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH +KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv +2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 +mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCB +rjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0BgNVBAMTLVVUTi1VU0VSRmlyc3Qt +Q2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05OTA3MDkxNzI4NTBa +Fw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAV +BgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5l +dHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UE +AxMtVVROLVVTRVJGaXJzdC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWls +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3B +YHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIxB8dOtINknS4p1aJkxIW9 +hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8om+rWV6l +L8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLm +SGHGTPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM +1tZUOt4KpLoDd7NlyP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws +6wIDAQABo4G5MIG2MAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNVHR8EUTBPME2gS6BJhkdodHRw +Oi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGllbnRBdXRoZW50 +aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u +7mFVbwQ+zznexRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0 +xtcgBEXkzYABurorbs6q15L+5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQ +rfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarVNZ1yQAOJujEdxRBoUp7fooXFXAim +eOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZw7JHpsIyYdfHb0gk +USeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCB +lTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAbBgNVBAMTFFVUTi1VU0VSRmlyc3Qt +T2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAzNlowgZUxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkxHjAc +BgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3 +dy51c2VydHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicP +HxzfOpuCaDDASmEd8S8O+r5596Uj71VRloTN2+O5bj4x2AogZ8f02b+U60cEPgLO +KqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQw5ujm9M89RKZd7G3CeBo +5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vulBe3/IW+ +pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehb +kkj7RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUC +AwEAAaOBrzCBrDALBgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU2u1kdBScFDyr3ZmpvVsoTYs8ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDov +L2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmlyc3QtT2JqZWN0LmNybDApBgNV +HSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQwDQYJKoZIhvcN +AQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw +NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXB +mMiKVl0+7kNOPmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU +4U3GDZlDAQ0Slox4nb9QorFEqmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK5 +81OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCGhU3IfdeLA/5u1fedFqySLKAj5ZyR +Uh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN2E1Lm0+afY8wR4 +nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/EbRrsC+MO +8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjV +ojYJrKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjb +PG7PoBMAGrgnoeS+Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP2 +6KbqxzcSXKMpHgLZ2x87tNcPVkeBFQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vr +n5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAq2aN17O6x5q25lXQBfGfMY1a +qtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/Ny9Sn2WCVhDr4 +wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrs +pSCAaWihT37ha88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4 +E1Z5T21Q6huwtVexN2ZYI/PcD98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVy +aVNpZ24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24s +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNp +Z24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJBgNV +BAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNp +Z24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24g +Q2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArwoNwtUs22e5LeWU +J92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6tW8UvxDO +JxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUY +wZF7C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9o +koqQHgiBVrKtaaNS0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjN +qWm6o+sdDZykIKbBoMXRRkwXbdKsZj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/E +Srg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0JhU8wI1NQ0kdvekhktdmnLfe +xbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf0xwLRtxyID+u +7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RI +sH/7NiXaldDxJBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTP +cjnhsUPgKM+351psE2tJs//jGHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw +CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h +dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l +cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h +2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E +lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV +ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq +299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t +vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL +dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF +AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR +zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3 +LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd +7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw +++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID+TCCAuGgAwIBAgIQW1fXqEywr9nTb0ugMbTW4jANBgkqhkiG9w0BAQUFADB5 +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xKjAoBgNVBAMTIVZpc2EgSW5m +b3JtYXRpb24gRGVsaXZlcnkgUm9vdCBDQTAeFw0wNTA2MjcxNzQyNDJaFw0yNTA2 +MjkxNzQyNDJaMHkxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRWSVNBMS8wLQYDVQQL +EyZWaXNhIEludGVybmF0aW9uYWwgU2VydmljZSBBc3NvY2lhdGlvbjEqMCgGA1UE +AxMhVmlzYSBJbmZvcm1hdGlvbiBEZWxpdmVyeSBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyREA4R/QkkfpLx0cYjga/EhIPZpchH0MZsRZ +FfP6C2ITtf/Wc+MtgD4yTK0yoiXvni3d+aCtEgK3GDvkdgYrgF76ROJFZwUQjQ9l +x42gRT05DbXvWFoy7dTglCZ9z/Tt2Cnktv9oxKgmkeHY/CyfpCBg1S8xth2JlGMR +0ug/GMO5zANuegZOv438p5Lt5So+du2Gl+RMFQqEPwqN5uJSqAe0VtmB4gWdQ8on +Bj2ZAM2R73QW7UW0Igt2vA4JaSiNtaAG/Y/58VXWHGgbq7rDtNK1R30X0kJV0rGA +ib3RSwB3LpG7bOjbIucV5mQgJoVjoA1e05w6g1x/KmNTmOGRVwIDAQABo30wezAP +BgNVHRMBAf8EBTADAQH/MDkGA1UdIAQyMDAwLgYFZ4EDAgEwJTAVBggrBgEFBQcC +ARYJMS4yLjMuNC41MAwGCCsGAQUFBwICMAAwDgYDVR0PAQH/BAQDAgEGMB0GA1Ud +DgQWBBRPitp2/2d3I5qmgH1924h1hfeBejANBgkqhkiG9w0BAQUFAAOCAQEACUW1 +QdUHdDJydgDPmYt+telnG/Su+DPaf1cregzlN43bJaJosMP7NwjoJY/H2He4XLWb +5rXEkl+xH1UyUwF7mtaUoxbGxEvt8hPZSTB4da2mzXgwKvXuHyzF5Qjy1hOB0/pS +WaF9ARpVKJJ7TOJQdGKBsF2Ty4fSCLqZLgfxbqwMsd9sysXI3rDXjIhekqvbgeLz +PqZr+pfgFhwCCLSMQWl5Ll3u7Qk9wR094DZ6jj6+JCVCRUS3HyabH4OlM0Vc2K+j +INsF/64Or7GNtRf9HYEJvrPxHINxl3JVwhYj4ASeaO4KwhVbwtw94Tc/XrGcexDo +c5lC3rAi4/UZqweYCw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwKgAwIBAgIDAYagMA0GCSqGSIb3DQEBBQUAMIGjMQswCQYDVQQGEwJG +STEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0ZXJpa2Vz +a3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBTZXJ2aWNl +czEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJLIEdvdi4g +Um9vdCBDQTAeFw0wMjEyMTgxMzUzMDBaFw0yMzEyMTgxMzUxMDhaMIGjMQswCQYD +VQQGEwJGSTEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0 +ZXJpa2Vza3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBT +ZXJ2aWNlczEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJL +IEdvdi4gUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALCF +FdrIAzfQo0Y3bBseljDCWoUSZyPyu5/nioFgJ/gTqTy894aqqvTzJSm0/nWuHoGG +igWyHWWyOOi0zCia+xc28ZPVec7Bg4shT8MNrUHfeJ1I4x9CRPw8bSEga60ihCRC +jxdNwlAfZM0tOSJWiP2yY51U2kJpwMhP1xjiPshphJQ9LIDGfM6911Mf64i5psu7 +hVfvV3ZdDIvTXhJBnyHAOfQmbQj6OLOhd7HuFtjQaNq0mKWgZUZKa41+qk1guPjI +DfxxPu45h4G02fhukO4/DmHXHSto5i7hQkQmeCxY8n0Wf2HASSQqiYe2XS8pGfim +545SnkFLWg6quMJmQlMCAwEAAaNVMFMwDwYDVR0TAQH/BAUwAwEB/zARBglghkgB +hvhCAQEEBAMCAAcwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBTb6eGb0tEkC/yr +46Bn6q6cS3f0sDANBgkqhkiG9w0BAQUFAAOCAQEArX1ID1QRnljurw2bEi8hpM2b +uoRH5sklVSPj3xhYKizbXvfNVPVRJHtiZ+GxH0mvNNDrsczZog1Sf0JLiGCXzyVy +t08pLWKfT6HAVVdWDsRol5EfnGTCKTIB6dTI2riBmCguGMcs/OubUpbf9MiQGS0j +8/G7cdqehSO9Gu8u5Hp5t8OdhkktY7ktdM9lDzJmid87Ie4pbzlj2RXBbvbfgD5Q +eBmK3QOjFKU3p7UsfLYRh+cF8ry23tT/l4EohP7+bEaFEEGfTXWMB9SZZ291im/k +UJL2mdUQuMSpe/cXjUu/15WfCdxEDx4yw8DP03kN5Mc7h/CQNIghYkmSBAQfvA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- +` diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_js.go b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go new file mode 100644 index 00000000000..4240207a0a0 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js && wasm +// +build js,wasm + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{} + +func loadSystemRoots() (*CertPool, error) { + return NewCertPool(), nil +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go new file mode 100644 index 00000000000..267775dc5f0 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", // OpenSUSE + "/etc/pki/tls/cacert.pem", // OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7 + "/etc/ssl/cert.pem", // Alpine Linux +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go new file mode 100644 index 00000000000..2ee1d5ce800 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo +// +build !cgo + +package x509 + +func loadSystemRoots() (*CertPool, error) { + return execSecurityRoots() +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go new file mode 100644 index 00000000000..2bdb2fe7136 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package x509 + +import ( + "os" +) + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/sys/lib/tls/ca.pem", +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func loadSystemRoots() (*CertPool, error) { + roots := NewCertPool() + var bestErr error + for _, file := range certFiles { + data, err := os.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + return roots, nil + } + if bestErr == nil || (os.IsNotExist(bestErr) && !os.IsNotExist(err)) { + bestErr = err + } + } + if bestErr == nil { + return roots, nil + } + return nil, bestErr +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go b/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go new file mode 100644 index 00000000000..e6d4e613994 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go @@ -0,0 +1,12 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/certs/ca-certificates.crt", // Solaris 11.2+ + "/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS + "/etc/ssl/cacert.pem", // OmniOS +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go new file mode 100644 index 00000000000..ac2d01d9798 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package x509 + +import ( + "os" +) + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139 + "/system/etc/security/cacerts", // Android + "/usr/local/share/certs", // FreeBSD + "/etc/pki/tls/certs", // Fedora/RHEL + "/etc/openssl/certs", // NetBSD + "/var/ssl/certs", // AIX +} + +const ( + // certFileEnv is the environment variable which identifies where to locate + // the SSL certificate file. If set this overrides the system default. + certFileEnv = "SSL_CERT_FILE" + + // certDirEnv is the environment variable which identifies which directory + // to check for SSL certificate files. If set this overrides the system default. + certDirEnv = "SSL_CERT_DIR" +) + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func loadSystemRoots() (*CertPool, error) { + roots := NewCertPool() + + files := certFiles + if f := os.Getenv(certFileEnv); f != "" { + files = []string{f} + } + + var firstErr error + for _, file := range files { + data, err := os.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + break + } + if firstErr == nil && !os.IsNotExist(err) { + firstErr = err + } + } + + dirs := certDirectories + if d := os.Getenv(certDirEnv); d != "" { + dirs = []string{d} + } + + for _, directory := range dirs { + fis, err := os.ReadDir(directory) + if err != nil { + if firstErr == nil && !os.IsNotExist(err) { + firstErr = err + } + continue + } + rootsAdded := false + for _, fi := range fis { + data, err := os.ReadFile(directory + "/" + fi.Name()) + if err == nil && roots.AppendCertsFromPEM(data) { + rootsAdded = true + } + } + if rootsAdded { + return roots, nil + } + } + + if len(roots.certs) > 0 || firstErr == nil { + return roots, nil + } + + return nil, firstErr +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go b/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go new file mode 100644 index 00000000000..e5cf98e0af0 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasip1 +// +build wasip1 + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{} + +func loadSystemRoots() (*CertPool, error) { + return NewCertPool(), nil +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go new file mode 100644 index 00000000000..39ec95ef3aa --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go @@ -0,0 +1,286 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "errors" + "syscall" + "unsafe" +) + +// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory +// certificate store containing itself and all of the intermediate certificates specified +// in the opts.Intermediates CertPool. +// +// A pointer to the in-memory store is available in the returned CertContext's Store field. +// The store is automatically freed when the CertContext is freed using +// syscall.CertFreeCertificateContext. +func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) { + var storeCtx *syscall.CertContext + + leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw))) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(leafCtx) + + handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0) + if err != nil { + return nil, err + } + defer syscall.CertCloseStore(handle, 0) + + err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx) + if err != nil { + return nil, err + } + + if opts.Intermediates != nil { + for _, intermediate := range opts.Intermediates.certs { + ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw))) + if err != nil { + return nil, err + } + + err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil) + syscall.CertFreeCertificateContext(ctx) + if err != nil { + return nil, err + } + } + } + + return storeCtx, nil +} + +// extractSimpleChain extracts the final certificate chain from a CertSimpleChain. +func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) { + if simpleChain == nil || count == 0 { + return nil, errors.New("x509: invalid simple chain") + } + + simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:count:count] + lastChain := simpleChains[count-1] + elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:lastChain.NumElements:lastChain.NumElements] + for i := 0; i < int(lastChain.NumElements); i++ { + // Copy the buf, since ParseCertificate does not create its own copy. + cert := elements[i].CertContext + encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length] + buf := make([]byte, cert.Length) + copy(buf, encodedCert) + parsedCert, err := ParseCertificate(buf) + if err != nil { + return nil, err + } + chain = append(chain, parsedCert) + } + + return chain, nil +} + +// checkChainTrustStatus checks the trust status of the certificate chain, translating +// any errors it finds into Go errors in the process. +func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error { + if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR { + status := chainCtx.TrustStatus.ErrorStatus + switch status { + case syscall.CERT_TRUST_IS_NOT_TIME_VALID: + return CertificateInvalidError{c, Expired, ""} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + return nil +} + +// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for +// use as a certificate chain for a SSL/TLS server. +func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error { + servernamep, err := syscall.UTF16PtrFromString(opts.DNSName) + if err != nil { + return err + } + sslPara := &syscall.SSLExtraCertChainPolicyPara{ + AuthType: syscall.AUTHTYPE_SERVER, + ServerName: servernamep, + } + sslPara.Size = uint32(unsafe.Sizeof(*sslPara)) + + para := &syscall.CertChainPolicyPara{ + ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)), + } + para.Size = uint32(unsafe.Sizeof(*para)) + + status := syscall.CertChainPolicyStatus{} + err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status) + if err != nil { + return err + } + + // TODO(mkrautz): use the lChainIndex and lElementIndex fields + // of the CertChainPolicyStatus to provide proper context, instead + // using c. + if status.Error != 0 { + switch status.Error { + case syscall.CERT_E_EXPIRED: + return CertificateInvalidError{c, Expired, ""} + case syscall.CERT_E_CN_NO_MATCH: + return HostnameError{c, opts.DNSName} + case syscall.CERT_E_UNTRUSTEDROOT: + return UnknownAuthorityError{c, nil, nil} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + + return nil +} + +// systemVerify is like Verify, except that it uses CryptoAPI calls +// to build certificate chains and verify them. +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + hasDNSName := opts != nil && len(opts.DNSName) > 0 + + storeCtx, err := createStoreContext(c, opts) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(storeCtx) + + para := new(syscall.CertChainPara) + para.Size = uint32(unsafe.Sizeof(*para)) + + // If there's a DNSName set in opts, assume we're verifying + // a certificate from a TLS server. + if hasDNSName { + oids := []*byte{ + &syscall.OID_PKIX_KP_SERVER_AUTH[0], + // Both IE and Chrome allow certificates with + // Server Gated Crypto as well. Some certificates + // in the wild require them. + &syscall.OID_SERVER_GATED_CRYPTO[0], + &syscall.OID_SGC_NETSCAPE[0], + } + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR + para.RequestedUsage.Usage.Length = uint32(len(oids)) + para.RequestedUsage.Usage.UsageIdentifiers = &oids[0] + } else { + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND + para.RequestedUsage.Usage.Length = 0 + para.RequestedUsage.Usage.UsageIdentifiers = nil + } + + var verifyTime *syscall.Filetime + if opts != nil && !opts.CurrentTime.IsZero() { + ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano()) + verifyTime = &ft + } + + // CertGetCertificateChain will traverse Windows's root stores + // in an attempt to build a verified certificate chain. Once + // it has found a verified chain, it stops. MSDN docs on + // CERT_CHAIN_CONTEXT: + // + // When a CERT_CHAIN_CONTEXT is built, the first simple chain + // begins with an end certificate and ends with a self-signed + // certificate. If that self-signed certificate is not a root + // or otherwise trusted certificate, an attempt is made to + // build a new chain. CTLs are used to create the new chain + // beginning with the self-signed certificate from the original + // chain as the end certificate of the new chain. This process + // continues building additional simple chains until the first + // self-signed certificate is a trusted certificate or until + // an additional simple chain cannot be built. + // + // The result is that we'll only get a single trusted chain to + // return to our caller. + var chainCtx *syscall.CertChainContext + err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateChain(chainCtx) + + err = checkChainTrustStatus(c, chainCtx) + if err != nil { + return nil, err + } + + if hasDNSName { + err = checkChainSSLServerPolicy(c, chainCtx, opts) + if err != nil { + return nil, err + } + } + + chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount)) + if err != nil { + return nil, err + } + if len(chain) < 1 { + return nil, errors.New("x509: internal error: system verifier returned an empty chain") + } + + // Mitigate CVE-2020-0601, where the Windows system verifier might be + // tricked into using custom curve parameters for a trusted root, by + // double-checking all ECDSA signatures. If the system was tricked into + // using spoofed parameters, the signature will be invalid for the correct + // ones we parsed. (We don't support custom curves ourselves.) + for i, parent := range chain[1:] { + if parent.PublicKeyAlgorithm != ECDSA { + continue + } + if err := parent.CheckSignature(chain[i].SignatureAlgorithm, + chain[i].RawTBSCertificate, chain[i].Signature); err != nil { + return nil, err + } + } + + return [][]*Certificate{chain}, nil +} + +func loadSystemRoots() (*CertPool, error) { + // TODO: restore this functionality on Windows. We tried to do + // it in Go 1.8 but had to revert it. See Issue 18609. + // Returning (nil, nil) was the old behavior, prior to CL 30578. + // The if statement here avoids vet complaining about + // unreachable code below. + if true { + return nil, nil + } + + const CRYPT_E_NOT_FOUND = 0x80092004 + + store, err := syscall.CertOpenSystemStore(0, syscall.StringToUTF16Ptr("ROOT")) + if err != nil { + return nil, err + } + defer syscall.CertCloseStore(store, 0) + + roots := NewCertPool() + var cert *syscall.CertContext + for { + cert, err = syscall.CertEnumCertificatesInStore(store, cert) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + if errno == CRYPT_E_NOT_FOUND { + break + } + } + return nil, err + } + if cert == nil { + break + } + // Copy the buf, since ParseCertificate does not create its own copy. + buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length] + buf2 := make([]byte, cert.Length) + copy(buf2, buf) + if c, err := ParseCertificate(buf2); err == nil { + roots.AddCert(c) + } + } + return roots, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go b/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go new file mode 100644 index 00000000000..54f240af07d --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos +// +build zos + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/cacert.pem", // IBM zOS default +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/rpki.go b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go new file mode 100644 index 00000000000..520d6dc3abd --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go @@ -0,0 +1,242 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" +) + +// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string, +// where the BitLength field holds the prefix length. +type IPAddressPrefix asn1.BitString + +// IPAddressRange describes an (inclusive) IP address range. +type IPAddressRange struct { + Min IPAddressPrefix + Max IPAddressPrefix +} + +// Most relevant values for AFI from: +// http://www.iana.org/assignments/address-family-numbers. +const ( + IPv4AddressFamilyIndicator = uint16(1) + IPv6AddressFamilyIndicator = uint16(2) +) + +// IPAddressFamilyBlocks describes a set of ranges of IP addresses. +type IPAddressFamilyBlocks struct { + // AFI holds an address family indicator from + // http://www.iana.org/assignments/address-family-numbers. + AFI uint16 + // SAFI holds a subsequent address family indicator from + // http://www.iana.org/assignments/safi-namespace. + SAFI byte + // InheritFromIssuer indicates that the set of addresses should + // be taken from the issuer's certificate. + InheritFromIssuer bool + // AddressPrefixes holds prefixes if InheritFromIssuer is false. + AddressPrefixes []IPAddressPrefix + // AddressRanges holds ranges if InheritFromIssuer is false. + AddressRanges []IPAddressRange +} + +// Internal types for asn1 unmarshalling. +type ipAddressFamily struct { + AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI + Choice asn1.RawValue +} + +// Internally, use raw asn1.BitString rather than the IPAddressPrefix +// type alias (so that asn1.Unmarshal() decodes properly). +type ipAddressRange struct { + Min asn1.BitString + Max asn1.BitString +} + +func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks { + // RFC 3779 2.2.3 + // IPAddrBlocks ::= SEQUENCE OF IPAddressFamily + // + // IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI -- + // addressFamily OCTET STRING (SIZE (2..3)), + // ipAddressChoice IPAddressChoice } + // + // IPAddressChoice ::= CHOICE { + // inherit NULL, -- inherit from issuer -- + // addressesOrRanges SEQUENCE OF IPAddressOrRange } + // + // IPAddressOrRange ::= CHOICE { + // addressPrefix IPAddress, + // addressRange IPAddressRange } + // + // IPAddressRange ::= SEQUENCE { + // min IPAddress, + // max IPAddress } + // + // IPAddress ::= BIT STRING + + var addrBlocks []ipAddressFamily + if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err)) + return nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ipAddrBlocks extension")) + return nil + } + + var results []*IPAddressFamilyBlocks + for i, block := range addrBlocks { + var fam IPAddressFamilyBlocks + if l := len(block.AddressFamily); l < 2 || l > 3 { + nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l)) + continue + } + fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2]) + if len(block.AddressFamily) > 2 { + fam.SAFI = block.AddressFamily[2] + } + // IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit) + // tagging of the alternatives -- here, either NULL or SEQUENCE OF. + if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) { + fam.InheritFromIssuer = true + results = append(results, &fam) + continue + } + + var addrRanges []asn1.RawValue + if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err)) + continue + } + for j, ar := range addrRanges { + // Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit) + // tags -- here, either BIT STRING or SEQUENCE. + switch ar.Tag { + case asn1.TagBitString: + // BIT STRING for single prefix IPAddress + var val asn1.BitString + if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err)) + continue + } + fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val)) + + case asn1.TagSequence: + var val ipAddressRange + if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err)) + continue + } + fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)}) + + default: + nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar)) + } + } + results = append(results, &fam) + } + return results +} + +// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing +// domain identifiers). +type ASIDRange struct { + Min int + Max int +} + +// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing +// domain identifiers). +type ASIdentifiers struct { + // InheritFromIssuer indicates that the set of AS identifiers should + // be taken from the issuer's certificate. + InheritFromIssuer bool + // ASIDs holds AS identifiers if InheritFromIssuer is false. + ASIDs []int + // ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false. + ASIDRanges []ASIDRange +} + +type asIdentifiers struct { + ASNum asn1.RawValue `asn1:"optional,tag:0"` + RDI asn1.RawValue `asn1:"optional,tag:1"` +} + +func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers { + // RFC 3779 2.3.2 + // ASIdentifierChoice ::= CHOICE { + // inherit NULL, -- inherit from issuer -- + // asIdsOrRanges SEQUENCE OF ASIdOrRange } + // ASIdOrRange ::= CHOICE { + // id ASId, + // range ASRange } + // ASRange ::= SEQUENCE { + // min ASId, + // max ASId } + // ASId ::= INTEGER + if len(val.FullBytes) == 0 { // OPTIONAL + return nil + } + // ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit) + // tagging of the alternatives -- here, either NULL or SEQUENCE OF. + if bytes.Equal(val.Bytes, asn1.NullBytes) { + return &ASIdentifiers{InheritFromIssuer: true} + } + var ids []asn1.RawValue + if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err)) + return nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges")) + return nil + } + var asID ASIdentifiers + for i, id := range ids { + // Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit) + // tags -- here, either INTEGER or SEQUENCE. + switch id.Tag { + case asn1.TagInteger: + var val int + if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err)) + continue + } + asID.ASIDs = append(asID.ASIDs, val) + + case asn1.TagSequence: + var val ASIDRange + if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err)) + continue + } + asID.ASIDRanges = append(asID.ASIDRanges, val) + + default: + nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id)) + } + } + return &asID +} + +func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) { + // RFC 3779 2.3.2 + // ASIdentifiers ::= SEQUENCE { + // asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL, + // rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL} + var asIDs asIdentifiers + if rest, err := asn1.Unmarshal(data, &asIDs); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err)) + return nil, nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASIdentifiers extension")) + return nil, nil + } + return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/sec1.go b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go new file mode 100644 index 00000000000..d19407079f3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go @@ -0,0 +1,127 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +const ecPrivKeyVersion = 1 + +// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure. +// References: +// +// RFC 5915 +// SEC1 - http://www.secg.org/sec1-v2.pdf +// +// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in +// most cases it is not. +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +// ParseECPrivateKey parses an EC private key in SEC 1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY". +func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) { + return parseECPrivateKey(nil, der) +} + +// MarshalECPrivateKey converts an EC private key to SEC 1, ASN.1 DER form. +// +// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY". +// For a more flexible key format which is not EC specific, use +// MarshalPKCS8PrivateKey. +func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) { + oid, ok := OIDFromNamedCurve(key.Curve) + if !ok { + return nil, errors.New("x509: unknown elliptic curve") + } + + return marshalECPrivateKeyWithOID(key, oid) +} + +// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and +// sets the curve ID to the given OID, or omits it if OID is nil. +func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) { + privateKeyBytes := key.D.Bytes() + paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8) + copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes) + + return asn1.Marshal(ecPrivateKey{ + Version: 1, + PrivateKey: paddedPrivateKey, + NamedCurveOID: oid, + PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)}, + }) +} + +// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +// The OID for the named curve may be provided from another source (such as +// the PKCS8 container) - if it is provided then use this instead of the OID +// that may exist in the EC private key structure. +func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) { + var privKey ecPrivateKey + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)") + } + if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil { + return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)") + } + return nil, errors.New("x509: failed to parse EC private key: " + err.Error()) + } + if privKey.Version != ecPrivKeyVersion { + return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version) + } + + var nfe NonFatalErrors + var curve elliptic.Curve + if namedCurveOID != nil { + curve = namedCurveFromOID(*namedCurveOID, &nfe) + } else { + curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe) + } + if curve == nil { + return nil, errors.New("x509: unknown elliptic curve") + } + + k := new(big.Int).SetBytes(privKey.PrivateKey) + curveOrder := curve.Params().N + if k.Cmp(curveOrder) >= 0 { + return nil, errors.New("x509: invalid elliptic curve private key value") + } + priv := new(ecdsa.PrivateKey) + priv.Curve = curve + priv.D = k + + privateKey := make([]byte, (curveOrder.BitLen()+7)/8) + + // Some private keys have leading zero padding. This is invalid + // according to [SEC1], but this code will ignore it. + for len(privKey.PrivateKey) > len(privateKey) { + if privKey.PrivateKey[0] != 0 { + return nil, errors.New("x509: invalid private key length") + } + privKey.PrivateKey = privKey.PrivateKey[1:] + } + + // Some private keys remove all leading zeros, this is also invalid + // according to [SEC1] but since OpenSSL used to do this, we ignore + // this too. + copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey) + priv.X, priv.Y = curve.ScalarBaseMult(privateKey) + + return priv, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt b/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt new file mode 100644 index 00000000000..b7fc9c51861 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIJAL8a/lsnspOqMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV +BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz +dHMxETAPBgNVBAMMCHRlc3QtZGlyMB4XDTE3MDIwMTIzNTAyN1oXDTI3MDEzMDIz +NTAyN1owTDELMAkGA1UEBhMCVUsxEzARBgNVBAgMClRlc3QtU3RhdGUxFTATBgNV +BAoMDEdvbGFuZyBUZXN0czERMA8GA1UEAwwIdGVzdC1kaXIwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQDzBoi43Yn30KN13PKFHu8LA4UmgCRToTukLItM +WK2Je45grs/axg9n3YJOXC6hmsyrkOnyBcx1xVNgSrOAll7fSjtChRIX72Xrloxu +XewtWVIrijqz6oylbvEmbRT3O8uynu5rF82Pmdiy8oiSfdywjKuPnE0hjV1ZSCql +MYcXqA+f0JFD8kMv4pbtxjGH8f2DkYQz+hHXLrJH4/MEYdVMQXoz/GDzLyOkrXBN +hpMaBBqg1p0P+tRdfLXuliNzA9vbZylzpF1YZ0gvsr0S5Y6LVtv7QIRygRuLY4kF +k+UYuFq8NrV8TykS7FVnO3tf4XcYZ7r2KV5FjYSrJtNNo85BV5c3xMD3fJ2XcOWk ++oD1ATdgAM3aKmSOxNtNItKKxBe1mkqDH41NbWx7xMad78gDznyeT0tjEOltN2bM +uXU1R/jgR/vq5Ec0AhXJyL/ziIcmuV2fSl/ZxT4ARD+16tgPiIx+welTf0v27/JY +adlfkkL5XsPRrbSguISrj7JeaO/gjG3KnDVHcZvYBpDfHqRhCgrosfe26TZcTXx2 +cRxOfvBjMz1zJAg+esuUzSkerreyRhzD7RpeZTwi6sxvx82MhYMbA3w1LtgdABio +9JRqZy3xqsIbNv7N46WO/qXL1UMRKb1UyHeW8g8btboz+B4zv1U0Nj+9qxPBbQui +dgL9LQIDAQABo1AwTjAdBgNVHQ4EFgQUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwHwYD +VR0jBBgwFoAUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwDAYDVR0TBAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAgEAvEVnUYsIOt87rggmLPqEueynkuQ+562M8EDHSQl82zbe +xDCxeg3DvPgKb+RvaUdt1362z/szK10SoeMgx6+EQLoV9LiVqXwNqeYfixrhrdw3 +ppAhYYhymdkbUQCEMHypmXP1vPhAz4o8Bs+eES1M+zO6ErBiD7SqkmBElT+GixJC +6epC9ZQFs+dw3lPlbiZSsGE85sqc3VAs0/JgpL/pb1/Eg4s0FUhZD2C2uWdSyZGc +g0/v3aXJCp4j/9VoNhI1WXz3M45nysZIL5OQgXymLqJElQa1pZ3Wa4i/nidvT4AT +Xlxc/qijM8set/nOqp7hVd5J0uG6qdwLRILUddZ6OpXd7ZNi1EXg+Bpc7ehzGsDt +3UFGzYXDjxYnK2frQfjLS8stOQIqSrGthW6x0fdkVx0y8BByvd5J6+JmZl4UZfzA +m99VxXSt4B9x6BvnY7ktzcFDOjtuLc4B/7yg9fv1eQuStA4cHGGAttsCg1X/Kx8W +PvkkeH0UWDZ9vhH9K36703z89da6MWF+bz92B0+4HoOmlVaXRkvblsNaynJnL0LC +Ayry7QBxuh5cMnDdRwJB3AVJIiJ1GVpb7aGvBOnx+s2lwRv9HWtghb+cbwwktx1M +JHyBf3GZNSWTpKY7cD8V+NnBv3UuioOVVo+XAU4LF/bYUjdRpxWADJizNtZrtFo= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt b/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt new file mode 100644 index 00000000000..caa83b9f824 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFbTCCA1WgAwIBAgIJAN338vEmMtLsMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNV +BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz +dHMxEjAQBgNVBAMMCXRlc3QtZmlsZTAeFw0xNzAyMDEyMzUyMDhaFw0yNzAxMzAy +MzUyMDhaME0xCzAJBgNVBAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYD +VQQKDAxHb2xhbmcgVGVzdHMxEjAQBgNVBAMMCXRlc3QtZmlsZTCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAPMGiLjdiffQo3Xc8oUe7wsDhSaAJFOhO6Qs +i0xYrYl7jmCuz9rGD2fdgk5cLqGazKuQ6fIFzHXFU2BKs4CWXt9KO0KFEhfvZeuW +jG5d7C1ZUiuKOrPqjKVu8SZtFPc7y7Ke7msXzY+Z2LLyiJJ93LCMq4+cTSGNXVlI +KqUxhxeoD5/QkUPyQy/ilu3GMYfx/YORhDP6Edcuskfj8wRh1UxBejP8YPMvI6St +cE2GkxoEGqDWnQ/61F18te6WI3MD29tnKXOkXVhnSC+yvRLljotW2/tAhHKBG4tj +iQWT5Ri4Wrw2tXxPKRLsVWc7e1/hdxhnuvYpXkWNhKsm002jzkFXlzfEwPd8nZdw +5aT6gPUBN2AAzdoqZI7E200i0orEF7WaSoMfjU1tbHvExp3vyAPOfJ5PS2MQ6W03 +Zsy5dTVH+OBH++rkRzQCFcnIv/OIhya5XZ9KX9nFPgBEP7Xq2A+IjH7B6VN/S/bv +8lhp2V+SQvlew9GttKC4hKuPsl5o7+CMbcqcNUdxm9gGkN8epGEKCuix97bpNlxN +fHZxHE5+8GMzPXMkCD56y5TNKR6ut7JGHMPtGl5lPCLqzG/HzYyFgxsDfDUu2B0A +GKj0lGpnLfGqwhs2/s3jpY7+pcvVQxEpvVTId5byDxu1ujP4HjO/VTQ2P72rE8Ft +C6J2Av0tAgMBAAGjUDBOMB0GA1UdDgQWBBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAf +BgNVHSMEGDAWgBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAMBgNVHRMEBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4ICAQB3sCntCcQwhMgRPPyvOCMyTcQ/Iv+cpfxz2Ck14nlx +AkEAH2CH0ov5GWTt07/ur3aa5x+SAKi0J3wTD1cdiw4U/6Uin6jWGKKxvoo4IaeK +SbM8w/6eKx6UbmHx7PA/eRABY9tTlpdPCVgw7/o3WDr03QM+IAtatzvaCPPczake +pbdLwmBZB/v8V+6jUajy6jOgdSH0PyffGnt7MWgDETmNC6p/Xigp5eh+C8Fb4NGT +xgHES5PBC+sruWp4u22bJGDKTvYNdZHsnw/CaKQWNsQqwisxa3/8N5v+PCff/pxl +r05pE3PdHn9JrCl4iWdVlgtiI9BoPtQyDfa/OEFaScE8KYR8LxaAgdgp3zYncWls +BpwQ6Y/A2wIkhlD9eEp5Ib2hz7isXOs9UwjdriKqrBXqcIAE5M+YIk3+KAQKxAtd +4YsK3CSJ010uphr12YKqlScj4vuKFjuOtd5RyyMIxUG3lrrhAu2AzCeKCLdVgA8+ +75FrYMApUdvcjp4uzbBoED4XRQlx9kdFHVbYgmE/+yddBYJM8u4YlgAL0hW2/D8p +z9JWIfxVmjJnBnXaKGBuiUyZ864A3PJndP6EMMo7TzS2CDnfCYuJjvI0KvDjFNmc +rQA04+qfMSEz3nmKhbbZu4eYLzlADhfH8tT4GMtXf71WLA5AUHGf2Y4+HIHTsmHG +vQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/certificate-transparency-go/x509/verify.go b/vendor/github.com/google/certificate-transparency-go/x509/verify.go new file mode 100644 index 00000000000..07118c2bf65 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/verify.go @@ -0,0 +1,1109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/url" + "os" + "reflect" + "runtime" + "strings" + "time" + "unicode/utf8" +) + +// ignoreCN disables interpreting Common Name as a hostname. See issue 24151. +var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1") + +type InvalidReason int + +const ( + // NotAuthorizedToSign results when a certificate is signed by another + // which isn't marked as a CA certificate. + NotAuthorizedToSign InvalidReason = iota + // Expired results when a certificate has expired, based on the time + // given in the VerifyOptions. + Expired + // CANotAuthorizedForThisName results when an intermediate or root + // certificate has a name constraint which doesn't permit a DNS or + // other name (including IP address) in the leaf certificate. + CANotAuthorizedForThisName + // TooManyIntermediates results when a path length constraint is + // violated. + TooManyIntermediates + // IncompatibleUsage results when the certificate's key usage indicates + // that it may only be used for a different purpose. + IncompatibleUsage + // NameMismatch results when the subject name of a parent certificate + // does not match the issuer name in the child. + NameMismatch + // NameConstraintsWithoutSANs results when a leaf certificate doesn't + // contain a Subject Alternative Name extension, but a CA certificate + // contains name constraints, and the Common Name can be interpreted as + // a hostname. + // + // You can avoid this error by setting the experimental GODEBUG environment + // variable to "x509ignoreCN=1", disabling Common Name matching entirely. + // This behavior might become the default in the future. + NameConstraintsWithoutSANs + // UnconstrainedName results when a CA certificate contains permitted + // name constraints, but leaf certificate contains a name of an + // unsupported or unconstrained type. + UnconstrainedName + // TooManyConstraints results when the number of comparison operations + // needed to check a certificate exceeds the limit set by + // VerifyOptions.MaxConstraintComparisions. This limit exists to + // prevent pathological certificates can consuming excessive amounts of + // CPU time to verify. + TooManyConstraints + // CANotAuthorizedForExtKeyUsage results when an intermediate or root + // certificate does not permit a requested extended key usage. + CANotAuthorizedForExtKeyUsage +) + +// CertificateInvalidError results when an odd error occurs. Users of this +// library probably want to handle all these errors uniformly. +type CertificateInvalidError struct { + Cert *Certificate + Reason InvalidReason + Detail string +} + +func (e CertificateInvalidError) Error() string { + switch e.Reason { + case NotAuthorizedToSign: + return "x509: certificate is not authorized to sign other certificates" + case Expired: + return "x509: certificate has expired or is not yet valid: " + e.Detail + case CANotAuthorizedForThisName: + return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail + case CANotAuthorizedForExtKeyUsage: + return "x509: a root or intermediate certificate is not authorized for an extended key usage: " + e.Detail + case TooManyIntermediates: + return "x509: too many intermediates for path length constraint" + case IncompatibleUsage: + return "x509: certificate specifies an incompatible key usage" + case NameMismatch: + return "x509: issuer name does not match subject from issuing certificate" + case NameConstraintsWithoutSANs: + return "x509: issuer has name constraints but leaf doesn't have a SAN extension" + case UnconstrainedName: + return "x509: issuer has name constraints but leaf contains unknown or unconstrained name: " + e.Detail + } + return "x509: unknown error" +} + +// HostnameError results when the set of authorized names doesn't match the +// requested name. +type HostnameError struct { + Certificate *Certificate + Host string +} + +func (h HostnameError) Error() string { + c := h.Certificate + + if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) && + matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) { + // This would have validated, if it weren't for the validHostname check on Common Name. + return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName + } + + var valid string + if ip := net.ParseIP(h.Host); ip != nil { + // Trying to validate an IP + if len(c.IPAddresses) == 0 { + return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs" + } + for _, san := range c.IPAddresses { + if len(valid) > 0 { + valid += ", " + } + valid += san.String() + } + } else { + if c.commonNameAsHostname() { + valid = c.Subject.CommonName + } else { + valid = strings.Join(c.DNSNames, ", ") + } + } + + if len(valid) == 0 { + return "x509: certificate is not valid for any names, but wanted to match " + h.Host + } + return "x509: certificate is valid for " + valid + ", not " + h.Host +} + +// UnknownAuthorityError results when the certificate issuer is unknown +type UnknownAuthorityError struct { + Cert *Certificate + // hintErr contains an error that may be helpful in determining why an + // authority wasn't found. + hintErr error + // hintCert contains a possible authority certificate that was rejected + // because of the error in hintErr. + hintCert *Certificate +} + +func (e UnknownAuthorityError) Error() string { + s := "x509: certificate signed by unknown authority" + if e.hintErr != nil { + certName := e.hintCert.Subject.CommonName + if len(certName) == 0 { + if len(e.hintCert.Subject.Organization) > 0 { + certName = e.hintCert.Subject.Organization[0] + } else { + certName = "serial:" + e.hintCert.SerialNumber.String() + } + } + s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName) + } + return s +} + +// SystemRootsError results when we fail to load the system root certificates. +type SystemRootsError struct { + Err error +} + +func (se SystemRootsError) Error() string { + msg := "x509: failed to load system roots and no roots provided" + if se.Err != nil { + return msg + "; " + se.Err.Error() + } + return msg +} + +// errNotParsed is returned when a certificate without ASN.1 contents is +// verified. Platform-specific verification needs the ASN.1 contents. +var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate") + +// VerifyOptions contains parameters for Certificate.Verify. It's a structure +// because other PKIX verification APIs have ended up needing many options. +type VerifyOptions struct { + DNSName string + Intermediates *CertPool + Roots *CertPool // if nil, the system roots are used + CurrentTime time.Time // if zero, the current time is used + // Options to disable various verification checks. + DisableTimeChecks bool + DisableCriticalExtensionChecks bool + DisableNameChecks bool + DisableEKUChecks bool + DisablePathLenChecks bool + DisableNameConstraintChecks bool + // KeyUsage specifies which Extended Key Usage values are acceptable. A leaf + // certificate is accepted if it contains any of the listed values. An empty + // list means ExtKeyUsageServerAuth. To accept any key usage, include + // ExtKeyUsageAny. + // + // Certificate chains are required to nest these extended key usage values. + // (This matches the Windows CryptoAPI behavior, but not the spec.) + KeyUsages []ExtKeyUsage + // MaxConstraintComparisions is the maximum number of comparisons to + // perform when checking a given certificate's name constraints. If + // zero, a sensible default is used. This limit prevents pathological + // certificates from consuming excessive amounts of CPU time when + // validating. + MaxConstraintComparisions int +} + +const ( + leafCertificate = iota + intermediateCertificate + rootCertificate +) + +// rfc2821Mailbox represents a “mailbox” (which is an email address to most +// people) by breaking it into the “local” (i.e. before the '@') and “domain” +// parts. +type rfc2821Mailbox struct { + local, domain string +} + +// parseRFC2821Mailbox parses an email address into local and domain parts, +// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280, +// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The +// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”. +func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { + if len(in) == 0 { + return mailbox, false + } + + localPartBytes := make([]byte, 0, len(in)/2) + + if in[0] == '"' { + // Quoted-string = DQUOTE *qcontent DQUOTE + // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127 + // qcontent = qtext / quoted-pair + // qtext = non-whitespace-control / + // %d33 / %d35-91 / %d93-126 + // quoted-pair = ("\" text) / obs-qp + // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text + // + // (Names beginning with “obs-” are the obsolete syntax from RFC 2822, + // Section 4. Since it has been 16 years, we no longer accept that.) + in = in[1:] + QuotedString: + for { + if len(in) == 0 { + return mailbox, false + } + c := in[0] + in = in[1:] + + switch { + case c == '"': + break QuotedString + + case c == '\\': + // quoted-pair + if len(in) == 0 { + return mailbox, false + } + if in[0] == 11 || + in[0] == 12 || + (1 <= in[0] && in[0] <= 9) || + (14 <= in[0] && in[0] <= 127) { + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + } else { + return mailbox, false + } + + case c == 11 || + c == 12 || + // Space (char 32) is not allowed based on the + // BNF, but RFC 3696 gives an example that + // assumes that it is. Several “verified” + // errata continue to argue about this point. + // We choose to accept it. + c == 32 || + c == 33 || + c == 127 || + (1 <= c && c <= 8) || + (14 <= c && c <= 31) || + (35 <= c && c <= 91) || + (93 <= c && c <= 126): + // qtext + localPartBytes = append(localPartBytes, c) + + default: + return mailbox, false + } + } + } else { + // Atom ("." Atom)* + NextChar: + for len(in) > 0 { + // atext from RFC 2822, Section 3.2.4 + c := in[0] + + switch { + case c == '\\': + // Examples given in RFC 3696 suggest that + // escaped characters can appear outside of a + // quoted string. Several “verified” errata + // continue to argue the point. We choose to + // accept it. + in = in[1:] + if len(in) == 0 { + return mailbox, false + } + fallthrough + + case ('0' <= c && c <= '9') || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + c == '!' || c == '#' || c == '$' || c == '%' || + c == '&' || c == '\'' || c == '*' || c == '+' || + c == '-' || c == '/' || c == '=' || c == '?' || + c == '^' || c == '_' || c == '`' || c == '{' || + c == '|' || c == '}' || c == '~' || c == '.': + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + + default: + break NextChar + } + } + + if len(localPartBytes) == 0 { + return mailbox, false + } + + // From RFC 3696, Section 3: + // “period (".") may also appear, but may not be used to start + // or end the local part, nor may two or more consecutive + // periods appear.” + twoDots := []byte{'.', '.'} + if localPartBytes[0] == '.' || + localPartBytes[len(localPartBytes)-1] == '.' || + bytes.Contains(localPartBytes, twoDots) { + return mailbox, false + } + } + + if len(in) == 0 || in[0] != '@' { + return mailbox, false + } + in = in[1:] + + // The RFC species a format for domains, but that's known to be + // violated in practice so we accept that anything after an '@' is the + // domain part. + if _, ok := domainToReverseLabels(in); !ok { + return mailbox, false + } + + mailbox.local = string(localPartBytes) + mailbox.domain = in + return mailbox, true +} + +// domainToReverseLabels converts a textual domain name like foo.example.com to +// the list of labels in reverse order, e.g. ["com", "example", "foo"]. +func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + for len(domain) > 0 { + if i := strings.LastIndexByte(domain, '.'); i == -1 { + reverseLabels = append(reverseLabels, domain) + domain = "" + } else { + reverseLabels = append(reverseLabels, domain[i+1:]) + domain = domain[:i] + } + } + + if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 { + // An empty label at the end indicates an absolute value. + return nil, false + } + + for _, label := range reverseLabels { + if len(label) == 0 { + // Empty labels are otherwise invalid. + return nil, false + } + + for _, c := range label { + if c < 33 || c > 126 { + // Invalid character. + return nil, false + } + } + } + + return reverseLabels, true +} + +func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { + // If the constraint contains an @, then it specifies an exact mailbox + // name. + if strings.Contains(constraint, "@") { + constraintMailbox, ok := parseRFC2821Mailbox(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse constraint %q", constraint) + } + return mailbox.local == constraintMailbox.local && strings.EqualFold(mailbox.domain, constraintMailbox.domain), nil + } + + // Otherwise the constraint is like a DNS constraint of the domain part + // of the mailbox. + return matchDomainConstraint(mailbox.domain, constraint) +} + +func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { + // From RFC 5280, Section 4.2.1.10: + // “a uniformResourceIdentifier that does not include an authority + // component with a host name specified as a fully qualified domain + // name (e.g., if the URI either does not include an authority + // component or includes an authority component in which the host name + // is specified as an IP address), then the application MUST reject the + // certificate.” + + host := uri.Host + if len(host) == 0 { + return false, fmt.Errorf("URI with empty host (%q) cannot be matched against constraints", uri.String()) + } + + if strings.Contains(host, ":") && !strings.HasSuffix(host, "]") { + var err error + host, _, err = net.SplitHostPort(uri.Host) + if err != nil { + return false, err + } + } + + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") || + net.ParseIP(host) != nil { + return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) + } + + return matchDomainConstraint(host, constraint) +} + +func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { + if len(ip) != len(constraint.IP) { + return false, nil + } + + for i := range ip { + if mask := constraint.Mask[i]; ip[i]&mask != constraint.IP[i]&mask { + return false, nil + } + } + + return true, nil +} + +func matchDomainConstraint(domain, constraint string) (bool, error) { + // The meaning of zero length constraints is not specified, but this + // code follows NSS and accepts them as matching everything. + if len(constraint) == 0 { + return true, nil + } + + domainLabels, ok := domainToReverseLabels(domain) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + } + + // RFC 5280 says that a leading period in a domain name means that at + // least one label must be prepended, but only for URI and email + // constraints, not DNS constraints. The code also supports that + // behaviour for DNS constraints. + + mustHaveSubdomains := false + if constraint[0] == '.' { + mustHaveSubdomains = true + constraint = constraint[1:] + } + + constraintLabels, ok := domainToReverseLabels(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + } + + if len(domainLabels) < len(constraintLabels) || + (mustHaveSubdomains && len(domainLabels) == len(constraintLabels)) { + return false, nil + } + + for i, constraintLabel := range constraintLabels { + if !strings.EqualFold(constraintLabel, domainLabels[i]) { + return false, nil + } + } + + return true, nil +} + +// checkNameConstraints checks that c permits a child certificate to claim the +// given name, of type nameType. The argument parsedName contains the parsed +// form of name, suitable for passing to the match function. The total number +// of comparisons is tracked in the given count and should not exceed the given +// limit. +func (c *Certificate) checkNameConstraints(count *int, + maxConstraintComparisons int, + nameType string, + name string, + parsedName interface{}, + match func(parsedName, constraint interface{}) (match bool, err error), + permitted, excluded interface{}) error { + + excludedValue := reflect.ValueOf(excluded) + + *count += excludedValue.Len() + if *count > maxConstraintComparisons { + return CertificateInvalidError{c, TooManyConstraints, ""} + } + + for i := 0; i < excludedValue.Len(); i++ { + constraint := excludedValue.Index(i).Interface() + match, err := match(parsedName, constraint) + if err != nil { + return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} + } + + if match { + return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is excluded by constraint %q", nameType, name, constraint)} + } + } + + permittedValue := reflect.ValueOf(permitted) + + *count += permittedValue.Len() + if *count > maxConstraintComparisons { + return CertificateInvalidError{c, TooManyConstraints, ""} + } + + ok := true + for i := 0; i < permittedValue.Len(); i++ { + constraint := permittedValue.Index(i).Interface() + + var err error + if ok, err = match(parsedName, constraint); err != nil { + return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} + } + + if ok { + break + } + } + + if !ok { + return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is not permitted by any constraint", nameType, name)} + } + + return nil +} + +// isValid performs validity checks on c given that it is a candidate to append +// to the chain in currentChain. +func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error { + if !opts.DisableCriticalExtensionChecks && len(c.UnhandledCriticalExtensions) > 0 { + return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]} + } + + if !opts.DisableNameChecks && len(currentChain) > 0 { + child := currentChain[len(currentChain)-1] + if !bytes.Equal(child.RawIssuer, c.RawSubject) { + return CertificateInvalidError{c, NameMismatch, ""} + } + } + + if !opts.DisableTimeChecks { + now := opts.CurrentTime + if now.IsZero() { + now = time.Now() + } + if now.Before(c.NotBefore) { + return CertificateInvalidError{ + Cert: c, + Reason: Expired, + Detail: fmt.Sprintf("current time %s is before %s", now.Format(time.RFC3339), c.NotBefore.Format(time.RFC3339)), + } + } else if now.After(c.NotAfter) { + return CertificateInvalidError{ + Cert: c, + Reason: Expired, + Detail: fmt.Sprintf("current time %s is after %s", now.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)), + } + } + } + + maxConstraintComparisons := opts.MaxConstraintComparisions + if maxConstraintComparisons == 0 { + maxConstraintComparisons = 250000 + } + comparisonCount := 0 + + var leaf *Certificate + if certType == intermediateCertificate || certType == rootCertificate { + if len(currentChain) == 0 { + return errors.New("x509: internal error: empty chain when appending CA cert") + } + leaf = currentChain[0] + } + + checkNameConstraints := !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() + if checkNameConstraints && leaf.commonNameAsHostname() { + // This is the deprecated, legacy case of depending on the commonName as + // a hostname. We don't enforce name constraints against the CN, but + // VerifyHostname will look for hostnames in there if there are no SANs. + // In order to ensure VerifyHostname will not accept an unchecked name, + // return an error here. + return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""} + } else if checkNameConstraints && leaf.hasSANExtension() { + err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + name := string(data) + mailbox, ok := parseRFC2821Mailbox(name) + if !ok { + return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, + func(parsedName, constraint interface{}) (bool, error) { + return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) + }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { + return err + } + + case nameTypeDNS: + name := string(data) + if _, ok := domainToReverseLabels(name); !ok { + return fmt.Errorf("x509: cannot parse dnsName %q", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, + func(parsedName, constraint interface{}) (bool, error) { + return matchDomainConstraint(parsedName.(string), constraint.(string)) + }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { + return err + } + + case nameTypeURI: + name := string(data) + uri, err := url.Parse(name) + if err != nil { + return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, + func(parsedName, constraint interface{}) (bool, error) { + return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) + }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { + return err + } + + case nameTypeIP: + ip := net.IP(data) + if l := len(ip); l != net.IPv4len && l != net.IPv6len { + return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip, + func(parsedName, constraint interface{}) (bool, error) { + return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet)) + }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil { + return err + } + + default: + // Unknown SAN types are ignored. + } + + return nil + }) + + if err != nil { + return err + } + } + + // KeyUsage status flags are ignored. From Engineering Security, Peter + // Gutmann: A European government CA marked its signing certificates as + // being valid for encryption only, but no-one noticed. Another + // European CA marked its signature keys as not being valid for + // signatures. A different CA marked its own trusted root certificate + // as being invalid for certificate signing. Another national CA + // distributed a certificate to be used to encrypt data for the + // country’s tax authority that was marked as only being usable for + // digital signatures but not for encryption. Yet another CA reversed + // the order of the bit flags in the keyUsage due to confusion over + // encoding endianness, essentially setting a random keyUsage in + // certificates that it issued. Another CA created a self-invalidating + // certificate by adding a certificate policy statement stipulating + // that the certificate had to be used strictly as specified in the + // keyUsage, and a keyUsage containing a flag indicating that the RSA + // encryption key could only be used for Diffie-Hellman key agreement. + + if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) { + return CertificateInvalidError{c, NotAuthorizedToSign, ""} + } + + if !opts.DisablePathLenChecks && c.BasicConstraintsValid && c.MaxPathLen >= 0 { + numIntermediates := len(currentChain) - 1 + if numIntermediates > c.MaxPathLen { + return CertificateInvalidError{c, TooManyIntermediates, ""} + } + } + + return nil +} + +// Verify attempts to verify c by building one or more chains from c to a +// certificate in opts.Roots, using certificates in opts.Intermediates if +// needed. If successful, it returns one or more chains where the first +// element of the chain is c and the last element is from opts.Roots. +// +// If opts.Roots is nil and system roots are unavailable the returned error +// will be of type SystemRootsError. +// +// Name constraints in the intermediates will be applied to all names claimed +// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim +// example.com if an intermediate doesn't permit it, even if example.com is not +// the name being validated. Note that DirectoryName constraints are not +// supported. +// +// Extended Key Usage values are enforced down a chain, so an intermediate or +// root that enumerates EKUs prevents a leaf from asserting an EKU not in that +// list. +// +// WARNING: this function doesn't do any revocation checking. +func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) { + // Platform-specific verification needs the ASN.1 contents so + // this makes the behavior consistent across platforms. + if len(c.Raw) == 0 { + return nil, errNotParsed + } + if opts.Intermediates != nil { + for _, intermediate := range opts.Intermediates.certs { + if len(intermediate.Raw) == 0 { + return nil, errNotParsed + } + } + } + + // Use Windows's own verification and chain building. + if opts.Roots == nil && runtime.GOOS == "windows" { + return c.systemVerify(&opts) + } + + if opts.Roots == nil { + opts.Roots = systemRootsPool() + if opts.Roots == nil { + return nil, SystemRootsError{systemRootsErr} + } + } + + err = c.isValid(leafCertificate, nil, &opts) + if err != nil { + return + } + + if len(opts.DNSName) > 0 { + err = c.VerifyHostname(opts.DNSName) + if err != nil { + return + } + } + + var candidateChains [][]*Certificate + if opts.Roots.contains(c) { + candidateChains = append(candidateChains, []*Certificate{c}) + } else { + if candidateChains, err = c.buildChains(nil, []*Certificate{c}, nil, &opts); err != nil { + return nil, err + } + } + + keyUsages := opts.KeyUsages + if len(keyUsages) == 0 { + keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth} + } + + // If any key usage is acceptable then we're done. + for _, usage := range keyUsages { + if usage == ExtKeyUsageAny { + return candidateChains, nil + } + } + + for _, candidate := range candidateChains { + if opts.DisableEKUChecks || checkChainForKeyUsage(candidate, keyUsages) { + chains = append(chains, candidate) + } + } + + if len(chains) == 0 { + return nil, CertificateInvalidError{c, IncompatibleUsage, ""} + } + + return chains, nil +} + +func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate { + n := make([]*Certificate, len(chain)+1) + copy(n, chain) + n[len(chain)] = cert + return n +} + +// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls +// that an invocation of buildChains will (tranistively) make. Most chains are +// less than 15 certificates long, so this leaves space for multiple chains and +// for failed checks due to different intermediates having the same Subject. +const maxChainSignatureChecks = 100 + +func (c *Certificate) buildChains(cache map[*Certificate][][]*Certificate, currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) { + var ( + hintErr error + hintCert *Certificate + ) + + considerCandidate := func(certType int, candidate *Certificate) { + for _, cert := range currentChain { + if cert.Equal(candidate) { + return + } + } + + if sigChecks == nil { + sigChecks = new(int) + } + *sigChecks++ + if *sigChecks > maxChainSignatureChecks { + err = errors.New("x509: signature check attempts limit reached while verifying certificate chain") + return + } + + if err := c.CheckSignatureFrom(candidate); err != nil { + if hintErr == nil { + hintErr = err + hintCert = candidate + } + return + } + + err = candidate.isValid(certType, currentChain, opts) + if err != nil { + return + } + + switch certType { + case rootCertificate: + chains = append(chains, appendToFreshChain(currentChain, candidate)) + case intermediateCertificate: + if cache == nil { + cache = make(map[*Certificate][][]*Certificate) + } + childChains, ok := cache[candidate] + if !ok { + childChains, err = candidate.buildChains(cache, appendToFreshChain(currentChain, candidate), sigChecks, opts) + cache[candidate] = childChains + } + chains = append(chains, childChains...) + } + } + + for _, rootNum := range opts.Roots.findPotentialParents(c) { + considerCandidate(rootCertificate, opts.Roots.certs[rootNum]) + } + for _, intermediateNum := range opts.Intermediates.findPotentialParents(c) { + considerCandidate(intermediateCertificate, opts.Intermediates.certs[intermediateNum]) + } + + if len(chains) > 0 { + err = nil + } + if len(chains) == 0 && err == nil { + err = UnknownAuthorityError{c, hintErr, hintCert} + } + + return +} + +// validHostname reports whether host is a valid hostname that can be matched or +// matched against according to RFC 6125 2.2, with some leniency to accommodate +// legacy values. +func validHostname(host string) bool { + host = strings.TrimSuffix(host, ".") + + if len(host) == 0 { + return false + } + + for i, part := range strings.Split(host, ".") { + if part == "" { + // Empty label. + return false + } + if i == 0 && part == "*" { + // Only allow full left-most wildcards, as those are the only ones + // we match, and matching literal '*' characters is probably never + // the expected behavior. + continue + } + for j, c := range part { + if 'a' <= c && c <= 'z' { + continue + } + if '0' <= c && c <= '9' { + continue + } + if 'A' <= c && c <= 'Z' { + continue + } + if c == '-' && j != 0 { + continue + } + if c == '_' || c == ':' { + // Not valid characters in hostnames, but commonly + // found in deployments outside the WebPKI. + continue + } + return false + } + } + + return true +} + +// commonNameAsHostname reports whether the Common Name field should be +// considered the hostname that the certificate is valid for. This is a legacy +// behavior, disabled if the Subject Alt Name extension is present. +// +// It applies the strict validHostname check to the Common Name field, so that +// certificates without SANs can still be validated against CAs with name +// constraints if there is no risk the CN would be matched as a hostname. +// See NameConstraintsWithoutSANs and issue 24151. +func (c *Certificate) commonNameAsHostname() bool { + return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName) +} + +func matchHostnames(pattern, host string) bool { + host = strings.TrimSuffix(host, ".") + pattern = strings.TrimSuffix(pattern, ".") + + if len(pattern) == 0 || len(host) == 0 { + return false + } + + patternParts := strings.Split(pattern, ".") + hostParts := strings.Split(host, ".") + + if len(patternParts) != len(hostParts) { + return false + } + + for i, patternPart := range patternParts { + if i == 0 && patternPart == "*" { + continue + } + if patternPart != hostParts[i] { + return false + } + } + + return true +} + +// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use +// an explicitly ASCII function to avoid any sharp corners resulting from +// performing Unicode operations on DNS labels. +func toLowerCaseASCII(in string) string { + // If the string is already lower-case then there's nothing to do. + isAlreadyLowerCase := true + for _, c := range in { + if c == utf8.RuneError { + // If we get a UTF-8 error then there might be + // upper-case ASCII bytes in the invalid sequence. + isAlreadyLowerCase = false + break + } + if 'A' <= c && c <= 'Z' { + isAlreadyLowerCase = false + break + } + } + + if isAlreadyLowerCase { + return in + } + + out := []byte(in) + for i, c := range out { + if 'A' <= c && c <= 'Z' { + out[i] += 'a' - 'A' + } + } + return string(out) +} + +// VerifyHostname returns nil if c is a valid certificate for the named host. +// Otherwise it returns an error describing the mismatch. +func (c *Certificate) VerifyHostname(h string) error { + // IP addresses may be written in [ ]. + candidateIP := h + if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' { + candidateIP = h[1 : len(h)-1] + } + if ip := net.ParseIP(candidateIP); ip != nil { + // We only match IP addresses against IP SANs. + // See RFC 6125, Appendix B.2. + for _, candidate := range c.IPAddresses { + if ip.Equal(candidate) { + return nil + } + } + return HostnameError{c, candidateIP} + } + + lowered := toLowerCaseASCII(h) + + if c.commonNameAsHostname() { + if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) { + return nil + } + } else { + for _, match := range c.DNSNames { + if matchHostnames(toLowerCaseASCII(match), lowered) { + return nil + } + } + } + + return HostnameError{c, h} +} + +func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool { + usages := make([]ExtKeyUsage, len(keyUsages)) + copy(usages, keyUsages) + + if len(chain) == 0 { + return false + } + + usagesRemaining := len(usages) + + // We walk down the list and cross out any usages that aren't supported + // by each certificate. If we cross out all the usages, then the chain + // is unacceptable. + +NextCert: + for i := len(chain) - 1; i >= 0; i-- { + cert := chain[i] + if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 { + // The certificate doesn't have any extended key usage specified. + continue + } + + for _, usage := range cert.ExtKeyUsage { + if usage == ExtKeyUsageAny { + // The certificate is explicitly good for any usage. + continue NextCert + } + } + + const invalidUsage ExtKeyUsage = -1 + + NextRequestedUsage: + for i, requestedUsage := range usages { + if requestedUsage == invalidUsage { + continue + } + + for _, usage := range cert.ExtKeyUsage { + if requestedUsage == usage { + continue NextRequestedUsage + } else if requestedUsage == ExtKeyUsageServerAuth && + (usage == ExtKeyUsageNetscapeServerGatedCrypto || + usage == ExtKeyUsageMicrosoftServerGatedCrypto) { + // In order to support COMODO + // certificate chains, we have to + // accept Netscape or Microsoft SGC + // usages as equal to ServerAuth. + continue NextRequestedUsage + } + } + + usages[i] = invalidUsage + usagesRemaining-- + if usagesRemaining == 0 { + return false + } + } + } + + return true +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509/x509.go b/vendor/github.com/google/certificate-transparency-go/x509/x509.go new file mode 100644 index 00000000000..917d78779f3 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509/x509.go @@ -0,0 +1,3307 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 parses X.509-encoded keys and certificates. +// +// On UNIX systems the environment variables SSL_CERT_FILE and SSL_CERT_DIR +// can be used to override the system default locations for the SSL certificate +// file and SSL certificate files directory, respectively. +// +// This is a fork of the Go library crypto/x509 package, primarily adapted for +// use with Certificate Transparency. Main areas of difference are: +// +// Life as a fork: +// - Rename OS-specific cgo code so it doesn't clash with main Go library. +// - Use local library imports (asn1, pkix) throughout. +// - Add version-specific wrappers for Go version-incompatible code (in +// ptr_*_windows.go). +// Laxer certificate parsing: +// - Add options to disable various validation checks (times, EKUs etc). +// - Use NonFatalErrors type for some errors and continue parsing; this +// can be checked with IsFatal(err). +// - Support for short bitlength ECDSA curves (in curves.go). +// Certificate Transparency specific function: +// - Parsing and marshaling of SCTList extension. +// - RemoveSCTList() function for rebuilding CT leaf entry. +// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(), +// ParseTBSCertificate(), IsPrecertificate()). +// Revocation list processing: +// - Detailed CRL parsing (in revoked.go) +// - Detailed error recording mechanism (in error.go, errors.go) +// - Factor out parseDistributionPoints() for reuse. +// - Factor out and generalize GeneralNames parsing (in names.go) +// - Fix CRL commenting. +// RPKI support: +// - Support for SubjectInfoAccess extension +// - Support for RFC3779 extensions (in rpki.go) +// RSAES-OAEP support: +// - Support for parsing RSASES-OAEP public keys from certificates +// Ed25519 support: +// - Support for parsing and marshaling Ed25519 keys +// General improvements: +// - Export and use OID values throughout. +// - Export OIDFromNamedCurve(). +// - Export SignatureAlgorithmFromAI(). +// - Add OID value to UnhandledCriticalExtension error. +// - Minor typo/lint fixes. +package x509 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf8" + + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + "golang.org/x/crypto/ed25519" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo +// in RFC 3280. +type pkixPublicKey struct { + Algo pkix.AlgorithmIdentifier + BitString asn1.BitString +} + +// ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form. +// +// It returns a *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, or +// ed25519.PublicKey. More types might be supported in the future. +// +// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY". +func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) { + var pki publicKeyInfo + if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ASN.1 of public-key") + } + algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm) + if algo == UnknownPublicKeyAlgorithm { + return nil, errors.New("x509: unknown public key algorithm") + } + var nfe NonFatalErrors + pub, err = parsePublicKey(algo, &pki, &nfe) + if err != nil { + return pub, err + } + // Treat non-fatal errors as fatal for this entrypoint. + if len(nfe.Errors) > 0 { + return nil, nfe.Errors[0] + } + return pub, nil +} + +func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ + N: pub.N, + E: pub.E, + }) + if err != nil { + return nil, pkix.AlgorithmIdentifier{}, err + } + publicKeyAlgorithm.Algorithm = OIDPublicKeyRSA + // This is a NULL parameters value which is required by + // RFC 3279, Section 2.3.1. + publicKeyAlgorithm.Parameters = asn1.NullRawValue + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + oid, ok := OIDFromNamedCurve(pub.Curve) + if !ok { + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve") + } + publicKeyAlgorithm.Algorithm = OIDPublicKeyECDSA + var paramBytes []byte + paramBytes, err = asn1.Marshal(oid) + if err != nil { + return + } + publicKeyAlgorithm.Parameters.FullBytes = paramBytes + case ed25519.PublicKey: + publicKeyBytes = pub + publicKeyAlgorithm.Algorithm = OIDPublicKeyEd25519 + default: + return nil, pkix.AlgorithmIdentifier{}, fmt.Errorf("x509: unsupported public key type: %T", pub) + } + + return publicKeyBytes, publicKeyAlgorithm, nil +} + +// MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form. +// +// The following key types are currently supported: *rsa.PublicKey, *ecdsa.PublicKey +// and ed25519.PublicKey. Unsupported key types result in an error. +// +// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY". +func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) { + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + var err error + + if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil { + return nil, err + } + + pkix := pkixPublicKey{ + Algo: publicKeyAlgorithm, + BitString: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: 8 * len(publicKeyBytes), + }, + } + + ret, _ := asn1.Marshal(pkix) + return ret, nil +} + +// These structures reflect the ASN.1 structure of X.509 certificates.: + +type certificate struct { + Raw asn1.RawContent + TBSCertificate tbsCertificate + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +type tbsCertificate struct { + Raw asn1.RawContent + Version int `asn1:"optional,explicit,default:0,tag:0"` + SerialNumber *big.Int + SignatureAlgorithm pkix.AlgorithmIdentifier + Issuer asn1.RawValue + Validity validity + Subject asn1.RawValue + PublicKey publicKeyInfo + UniqueId asn1.BitString `asn1:"optional,tag:1"` + SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"` + Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"` +} + +// RFC 4055, 4.1 +// The current ASN.1 parser does not support non-integer defaults so +// the 'default:' tags here do nothing. +type rsaesoaepAlgorithmParameters struct { + HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"` + MaskgenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"` + PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"` +} + +type dsaAlgorithmParameters struct { + P, Q, G *big.Int +} + +type dsaSignature struct { + R, S *big.Int +} + +type ecdsaSignature dsaSignature + +type validity struct { + NotBefore, NotAfter time.Time +} + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +// RFC 5280, 4.2.1.1 +type authKeyId struct { + Id []byte `asn1:"optional,tag:0"` +} + +// SignatureAlgorithm indicates the algorithm used to sign a certificate. +type SignatureAlgorithm int + +// SignatureAlgorithm values: +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + MD2WithRSA + MD5WithRSA + SHA1WithRSA + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 + DSAWithSHA256 + ECDSAWithSHA1 + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 + SHA256WithRSAPSS + SHA384WithRSAPSS + SHA512WithRSAPSS + PureEd25519 +) + +// RFC 4055, 6. Basic object identifiers +var oidpSpecified = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 9} + +// These are the default parameters for an RSAES-OAEP pubkey. +// The current ASN.1 parser does not support non-integer defaults so +// these currently do nothing. +var ( + sha1Identifier = pkix.AlgorithmIdentifier{ + Algorithm: oidSHA1, + Parameters: asn1.NullRawValue, + } + mgf1SHA1Identifier = pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + // RFC 4055, 2.1 sha1Identifier + Parameters: asn1.RawValue{ + Class: asn1.ClassUniversal, + Tag: asn1.TagSequence, + IsCompound: false, + Bytes: []byte{6, 5, 43, 14, 3, 2, 26, 5, 0}, + FullBytes: []byte{16, 9, 6, 5, 43, 14, 3, 2, 26, 5, 0}}, + } + pSpecifiedEmptyIdentifier = pkix.AlgorithmIdentifier{ + Algorithm: oidpSpecified, + // RFC 4055, 4.1 nullOctetString + Parameters: asn1.RawValue{ + Class: asn1.ClassUniversal, + Tag: asn1.TagOctetString, + IsCompound: false, + Bytes: []byte{}, + FullBytes: []byte{4, 0}}, + } +) + +func (algo SignatureAlgorithm) isRSAPSS() bool { + switch algo { + case SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS: + return true + default: + return false + } +} + +func (algo SignatureAlgorithm) String() string { + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + return details.name + } + } + return strconv.Itoa(int(algo)) +} + +// PublicKeyAlgorithm indicates the algorithm used for a certificate's public key. +type PublicKeyAlgorithm int + +// PublicKeyAlgorithm values: +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA + ECDSA + Ed25519 + RSAESOAEP +) + +var publicKeyAlgoName = [...]string{ + RSA: "RSA", + DSA: "DSA", + ECDSA: "ECDSA", + Ed25519: "Ed25519", + RSAESOAEP: "RSAESOAEP", +} + +func (algo PublicKeyAlgorithm) String() string { + if 0 < algo && int(algo) < len(publicKeyAlgoName) { + return publicKeyAlgoName[algo] + } + return strconv.Itoa(int(algo)) +} + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 } +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } +// +// +// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers +// +// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + + oidSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + pubKeyAlgo PublicKeyAlgorithm + hash crypto.Hash +}{ + {MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */}, + {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, RSA, crypto.MD5}, + {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, RSA, crypto.SHA256}, + {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, RSA, crypto.SHA384}, + {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, RSA, crypto.SHA512}, + {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA256}, + {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA384}, + {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA512}, + {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, DSA, crypto.SHA1}, + {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, DSA, crypto.SHA256}, + {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1}, + {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256}, + {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384}, + {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512}, + {PureEd25519, "Ed25519", oidSignatureEd25519, Ed25519, crypto.Hash(0) /* no pre-hashing */}, +} + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See RFC 3447, Appendix A.2.3. +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +// rsaPSSParameters returns an asn1.RawValue suitable for use as the Parameters +// in an AlgorithmIdentifier that specifies RSA PSS. +func rsaPSSParameters(hashFunc crypto.Hash) asn1.RawValue { + var hashOID asn1.ObjectIdentifier + + switch hashFunc { + case crypto.SHA256: + hashOID = oidSHA256 + case crypto.SHA384: + hashOID = oidSHA384 + case crypto.SHA512: + hashOID = oidSHA512 + } + + params := pssParameters{ + Hash: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + }, + MGF: pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + }, + SaltLength: hashFunc.Size(), + TrailerField: 1, + } + + mgf1Params := pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + } + + var err error + params.MGF.Parameters.FullBytes, err = asn1.Marshal(mgf1Params) + if err != nil { + panic(err) + } + + serialized, err := asn1.Marshal(params) + if err != nil { + panic(err) + } + + return asn1.RawValue{FullBytes: serialized} +} + +// SignatureAlgorithmFromAI converts an PKIX algorithm identifier to the +// equivalent local constant. +func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm { + if ai.Algorithm.Equal(oidSignatureEd25519) { + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(ai.Parameters.FullBytes) != 0 { + return UnknownSignatureAlgorithm + } + } + + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return details.algo + } + } + return UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces them into + // three buckets by requiring that the MGF1 hash function always match the + // message hash function (as recommended in RFC 3447, Section 8.1), that the + // salt length matches the hash length, and that the trailer field has the + // default value. + if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) || + params.TrailerField != 1 { + return UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return SHA512WithRSAPSS + } + + return UnknownSignatureAlgorithm +} + +// RFC 3279, 2.3 Public Key Algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// +// rsadsi(113549) pkcs(1) 1 } +// +// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } +// +// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// +// x9-57(10040) x9cm(4) 1 } +// +// # RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters +// +// id-ecPublicKey OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } +var ( + OIDPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OIDPublicKeyRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7} + OIDPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + OIDPublicKeyRSAObsolete = asn1.ObjectIdentifier{2, 5, 8, 1, 1} + OIDPublicKeyEd25519 = oidSignatureEd25519 +) + +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm { + switch { + case oid.Equal(OIDPublicKeyRSA): + return RSA + case oid.Equal(OIDPublicKeyDSA): + return DSA + case oid.Equal(OIDPublicKeyECDSA): + return ECDSA + case oid.Equal(OIDPublicKeyRSAESOAEP): + return RSAESOAEP + case oid.Equal(OIDPublicKeyEd25519): + return Ed25519 + } + return UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// secp192r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 1 } +// +// NB: secp256r1 is equivalent to prime256v1, +// secp192r1 is equivalent to ansix9p192r and prime192v1 +var ( + OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve { + switch { + case oid.Equal(OIDNamedCurveP224): + return elliptic.P224() + case oid.Equal(OIDNamedCurveP256): + return elliptic.P256() + case oid.Equal(OIDNamedCurveP384): + return elliptic.P384() + case oid.Equal(OIDNamedCurveP521): + return elliptic.P521() + case oid.Equal(OIDNamedCurveP192): + nfe.AddError(errors.New("insecure curve (secp192r1) specified")) + return secp192r1() + } + return nil +} + +// OIDFromNamedCurve returns the OID used to specify the use of the given +// elliptic curve. +func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return OIDNamedCurveP224, true + case elliptic.P256(): + return OIDNamedCurveP256, true + case elliptic.P384(): + return OIDNamedCurveP384, true + case elliptic.P521(): + return OIDNamedCurveP521, true + case secp192r1(): + return OIDNamedCurveP192, true + } + + return nil, false +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +// KeyUsage values: +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +var ( + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} + oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22} + oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} + // RFC 6962 s3.1 + oidExtKeyUsageCertificateTransparency = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 4} +) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +// ExtKeyUsage values: +const ( + ExtKeyUsageAny ExtKeyUsage = iota + ExtKeyUsageServerAuth + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIPSECEndSystem + ExtKeyUsageIPSECTunnel + ExtKeyUsageIPSECUser + ExtKeyUsageTimeStamping + ExtKeyUsageOCSPSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageNetscapeServerGatedCrypto + ExtKeyUsageMicrosoftCommercialCodeSigning + ExtKeyUsageMicrosoftKernelCodeSigning + ExtKeyUsageCertificateTransparency +) + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, + {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning}, + {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning}, + {ExtKeyUsageCertificateTransparency, oidExtKeyUsageCertificateTransparency}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + for _, pair := range extKeyUsageOIDs { + if oid.Equal(pair.oid) { + return pair.extKeyUsage, true + } + } + return +} + +func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) { + for _, pair := range extKeyUsageOIDs { + if eku == pair.extKeyUsage { + return pair.oid, true + } + } + return +} + +// SerializedSCT represents a single TLS-encoded signed certificate timestamp, from RFC6962 s3.3. +type SerializedSCT struct { + Val []byte `tls:"minlen:1,maxlen:65535"` +} + +// SignedCertificateTimestampList is a list of signed certificate timestamps, from RFC6962 s3.3. +type SignedCertificateTimestampList struct { + SCTList []SerializedSCT `tls:"minlen:1,maxlen:65335"` +} + +// A Certificate represents an X.509 certificate. +type Certificate struct { + Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature). + RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject + RawIssuer []byte // DER encoded Issuer + + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Version int + SerialNumber *big.Int + Issuer pkix.Name + Subject pkix.Name + NotBefore, NotAfter time.Time // Validity bounds. + KeyUsage KeyUsage + + // Extensions contains raw X.509 extensions. When parsing certificates, + // this can be used to extract non-critical extensions that are not + // parsed by this package. When marshaling certificates, the Extensions + // field is ignored, see ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any + // marshaled certificates. Values override any extensions that would + // otherwise be produced based on the other fields. The ExtraExtensions + // field is not populated when parsing certificates, see Extensions. + ExtraExtensions []pkix.Extension + + // UnhandledCriticalExtensions contains a list of extension IDs that + // were not (fully) processed when parsing. Verify will fail if this + // slice is non-empty, unless verification is delegated to an OS + // library which understands all the critical extensions. + // + // Users can access these extensions using Extensions and can remove + // elements from this slice if they believe that they have been + // handled. + UnhandledCriticalExtensions []asn1.ObjectIdentifier + + ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages. + UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package. + + // BasicConstraintsValid indicates whether IsCA, MaxPathLen, + // and MaxPathLenZero are valid. + BasicConstraintsValid bool + IsCA bool + + // MaxPathLen and MaxPathLenZero indicate the presence and + // value of the BasicConstraints' "pathLenConstraint". + // + // When parsing a certificate, a positive non-zero MaxPathLen + // means that the field was specified, -1 means it was unset, + // and MaxPathLenZero being true mean that the field was + // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false + // should be treated equivalent to -1 (unset). + // + // When generating a certificate, an unset pathLenConstraint + // can be requested with either MaxPathLen == -1 or using the + // zero value for both MaxPathLen and MaxPathLenZero. + MaxPathLen int + // MaxPathLenZero indicates that BasicConstraintsValid==true + // and MaxPathLen==0 should be interpreted as an actual + // maximum path length of zero. Otherwise, that combination is + // interpreted as MaxPathLen not being set. + MaxPathLenZero bool + + SubjectKeyId []byte + AuthorityKeyId []byte + + // RFC 5280, 4.2.2.1 (Authority Information Access) + OCSPServer []string + IssuingCertificateURL []string + + // Subject Information Access + SubjectTimestamps []string + SubjectCARepositories []string + + // Subject Alternate Name values. (Note that these values may not be valid + // if invalid values were contained within a parsed certificate. For + // example, an element of DNSNames may not be a valid DNS domain name.) + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + + // Name constraints + PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical. + PermittedDNSDomains []string + ExcludedDNSDomains []string + PermittedIPRanges []*net.IPNet + ExcludedIPRanges []*net.IPNet + PermittedEmailAddresses []string + ExcludedEmailAddresses []string + PermittedURIDomains []string + ExcludedURIDomains []string + + // CRL Distribution Points + CRLDistributionPoints []string + + PolicyIdentifiers []asn1.ObjectIdentifier + + RPKIAddressRanges []*IPAddressFamilyBlocks + RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers + + // Certificate Transparency SCT extension contents; this is a TLS-encoded + // SignedCertificateTimestampList (RFC 6962 s3.3). + RawSCT []byte + SCTList SignedCertificateTimestampList +} + +// ErrUnsupportedAlgorithm results from attempting to perform an operation that +// involves algorithms that are not currently implemented. +var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented") + +// InsecureAlgorithmError results when the signature algorithm for a certificate +// is known to be insecure. +type InsecureAlgorithmError SignatureAlgorithm + +func (e InsecureAlgorithmError) Error() string { + return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e)) +} + +// ConstraintViolationError results when a requested usage is not permitted by +// a certificate. For example: checking a signature when the public key isn't a +// certificate signing key. +type ConstraintViolationError struct{} + +func (ConstraintViolationError) Error() string { + return "x509: invalid signature: parent certificate cannot sign this kind of certificate" +} + +// Equal indicates whether two Certificate objects are equal (by comparing their +// DER-encoded values). +func (c *Certificate) Equal(other *Certificate) bool { + if c == nil || other == nil { + return c == other + } + return bytes.Equal(c.Raw, other.Raw) +} + +// IsPrecertificate checks whether the certificate is a precertificate, by +// checking for the presence of the CT Poison extension. +func (c *Certificate) IsPrecertificate() bool { + if c == nil { + return false + } + for _, ext := range c.Extensions { + if ext.Id.Equal(OIDExtensionCTPoison) { + return true + } + } + return false +} + +func (c *Certificate) hasSANExtension() bool { + return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions) +} + +// Entrust have a broken root certificate (CN=Entrust.net Certification +// Authority (2048)) which isn't marked as a CA certificate and is thus invalid +// according to PKIX. +// We recognise this certificate by its SubjectPublicKeyInfo and exempt it +// from the Basic Constraints requirement. +// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869 +// +// TODO(agl): remove this hack once their reissued root is sufficiently +// widespread. +var entrustBrokenSPKI = []byte{ + 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, + 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, + 0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05, + 0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3, + 0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff, + 0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10, + 0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff, + 0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50, + 0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8, + 0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6, + 0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04, + 0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c, + 0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65, + 0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38, + 0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda, + 0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9, + 0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7, + 0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37, + 0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde, + 0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6, + 0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c, + 0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a, + 0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5, + 0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2, + 0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc, + 0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4, + 0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b, + 0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e, + 0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48, + 0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05, + 0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09, + 0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2, + 0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d, + 0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68, + 0x55, 0x02, 0x03, 0x01, 0x00, 0x01, +} + +// CheckSignatureFrom verifies that the signature on c is a valid signature +// from parent. +func (c *Certificate) CheckSignatureFrom(parent *Certificate) error { + // RFC 5280, 4.2.1.9: + // "If the basic constraints extension is not present in a version 3 + // certificate, or the extension is present but the cA boolean is not + // asserted, then the certified public key MUST NOT be used to verify + // certificate signatures." + // (except for Entrust, see comment above entrustBrokenSPKI) + if (parent.Version == 3 && !parent.BasicConstraintsValid || + parent.BasicConstraintsValid && !parent.IsCA) && + !bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) { + return ConstraintViolationError{} + } + + if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 { + return ConstraintViolationError{} + } + + if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm { + return ErrUnsupportedAlgorithm + } + + // TODO(agl): don't ignore the path length constraint. + + return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature) +} + +// CheckSignature verifies that signature is a valid signature over signed from +// c's public key. +func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) error { + return checkSignature(algo, signed, signature, c.PublicKey) +} + +func (c *Certificate) hasNameConstraints() bool { + return oidInExtensions(OIDExtensionNameConstraints, c.Extensions) +} + +func (c *Certificate) getSANExtension() []byte { + for _, e := range c.Extensions { + if e.Id.Equal(OIDExtensionSubjectAltName) { + return e.Value + } + } + + return nil +} + +func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error { + return fmt.Errorf("x509: signature algorithm specifies an %s public key, but have public key of type %T", expectedPubKeyAlgo.String(), pubKey) +} + +// CheckSignature verifies that signature is a valid signature over signed from +// a crypto.PublicKey. +func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey) (err error) { + var hashType crypto.Hash + var pubKeyAlgo PublicKeyAlgorithm + + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + hashType = details.hash + pubKeyAlgo = details.pubKeyAlgo + } + } + + switch hashType { + case crypto.Hash(0): + if pubKeyAlgo != Ed25519 { + return ErrUnsupportedAlgorithm + } + case crypto.MD5: + return InsecureAlgorithmError(algo) + default: + if !hashType.Available() { + return ErrUnsupportedAlgorithm + } + h := hashType.New() + h.Write(signed) + signed = h.Sum(nil) + } + + switch pub := publicKey.(type) { + case *rsa.PublicKey: + if pubKeyAlgo != RSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + if algo.isRSAPSS() { + return rsa.VerifyPSS(pub, hashType, signed, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}) + } else { + return rsa.VerifyPKCS1v15(pub, hashType, signed, signature) + } + case *dsa.PublicKey: + if pubKeyAlgo != DSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + dsaSig := new(dsaSignature) + if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after DSA signature") + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("x509: DSA signature contained zero or negative values") + } + // According to FIPS 186-3, section 4.6, the hash must be truncated if it is longer + // than the key length, but crypto/dsa doesn't do it automatically. + if maxHashLen := pub.Q.BitLen() / 8; maxHashLen < len(signed) { + signed = signed[:maxHashLen] + } + if !dsa.Verify(pub, signed, dsaSig.R, dsaSig.S) { + return errors.New("x509: DSA verification failure") + } + return + case *ecdsa.PublicKey: + if pubKeyAlgo != ECDSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + ecdsaSig := new(ecdsaSignature) + if rest, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after ECDSA signature") + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("x509: ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pub, signed, ecdsaSig.R, ecdsaSig.S) { + return errors.New("x509: ECDSA verification failure") + } + return + case ed25519.PublicKey: + if pubKeyAlgo != Ed25519 { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + if !ed25519.Verify(pub, signed, signature) { + return errors.New("x509: Ed25519 verification failure") + } + return + } + return ErrUnsupportedAlgorithm +} + +// CheckCRLSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error { + algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} + +// UnhandledCriticalExtension results when the certificate contains an extension +// that is marked as critical but which is not handled by this library. +type UnhandledCriticalExtension struct { + ID asn1.ObjectIdentifier +} + +func (h UnhandledCriticalExtension) Error() string { + return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID) +} + +// removeExtension takes a DER-encoded TBSCertificate, removes the extension +// specified by oid (preserving the order of other extensions), and returns the +// result still as a DER-encoded TBSCertificate. This function will fail if +// there is not exactly 1 extension of the type specified by the oid present. +func removeExtension(tbsData []byte, oid asn1.ObjectIdentifier) ([]byte, error) { + var tbs tbsCertificate + rest, err := asn1.Unmarshal(tbsData, &tbs) + if err != nil { + return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err) + } else if rLen := len(rest); rLen > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen) + } + extAt := -1 + for i, ext := range tbs.Extensions { + if ext.Id.Equal(oid) { + if extAt != -1 { + return nil, errors.New("multiple extensions of specified type present") + } + extAt = i + } + } + if extAt == -1 { + return nil, errors.New("no extension of specified type present") + } + tbs.Extensions = append(tbs.Extensions[:extAt], tbs.Extensions[extAt+1:]...) + // Clear out the asn1.RawContent so the re-marshal operation sees the + // updated structure (rather than just copying the out-of-date DER data). + tbs.Raw = nil + + data, err := asn1.Marshal(tbs) + if err != nil { + return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err) + } + return data, nil +} + +// RemoveSCTList takes a DER-encoded TBSCertificate and removes the CT SCT +// extension that contains the SCT list (preserving the order of other +// extensions), and returns the result still as a DER-encoded TBSCertificate. +// This function will fail if there is not exactly 1 CT SCT extension present. +func RemoveSCTList(tbsData []byte) ([]byte, error) { + return removeExtension(tbsData, OIDExtensionCTSCT) +} + +// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison +// extension (preserving the order of other extensions), and returns the result +// still as a DER-encoded TBSCertificate. This function will fail if there is +// not exactly 1 CT poison extension present. +func RemoveCTPoison(tbsData []byte) ([]byte, error) { + return BuildPrecertTBS(tbsData, nil) +} + +// BuildPrecertTBS builds a Certificate Transparency pre-certificate (RFC 6962 +// s3.1) from the given DER-encoded TBSCertificate, returning a DER-encoded +// TBSCertificate. +// +// This function removes the CT poison extension (there must be exactly 1 of +// these), preserving the order of other extensions. +// +// If preIssuer is provided, this should be a special intermediate certificate +// that was used to sign the precert (indicated by having the special +// CertificateTransparency extended key usage). In this case, the issuance +// information of the pre-cert is updated to reflect the next issuer in the +// chain, i.e. the issuer of this special intermediate: +// - The precert's Issuer is changed to the Issuer of the intermediate +// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the +// intermediate. +func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) { + data, err := removeExtension(tbsData, OIDExtensionCTPoison) + if err != nil { + return nil, err + } + + var tbs tbsCertificate + rest, err := asn1.Unmarshal(data, &tbs) + if err != nil { + return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err) + } else if rLen := len(rest); rLen > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen) + } + + if preIssuer != nil { + // Update the precert's Issuer field. Use the RawIssuer rather than the + // parsed Issuer to avoid any chance of ASN.1 differences (e.g. switching + // from UTF8String to PrintableString). + tbs.Issuer.FullBytes = preIssuer.RawIssuer + + // Also need to update the cert's AuthorityKeyID extension + // to that of the preIssuer. + var issuerKeyID []byte + for _, ext := range preIssuer.Extensions { + if ext.Id.Equal(OIDExtensionAuthorityKeyId) { + issuerKeyID = ext.Value + break + } + } + + // Check the preIssuer has the CT EKU. + seenCTEKU := false + for _, eku := range preIssuer.ExtKeyUsage { + if eku == ExtKeyUsageCertificateTransparency { + seenCTEKU = true + break + } + } + if !seenCTEKU { + return nil, fmt.Errorf("issuer does not have CertificateTransparency extended key usage") + } + + keyAt := -1 + for i, ext := range tbs.Extensions { + if ext.Id.Equal(OIDExtensionAuthorityKeyId) { + keyAt = i + break + } + } + if keyAt >= 0 { + // PreCert has an auth-key-id; replace it with the value from the preIssuer + if issuerKeyID != nil { + tbs.Extensions[keyAt].Value = issuerKeyID + } else { + tbs.Extensions = append(tbs.Extensions[:keyAt], tbs.Extensions[keyAt+1:]...) + } + } else if issuerKeyID != nil { + // PreCert did not have an auth-key-id, but the preIssuer does, so add it at the end. + authKeyIDExt := pkix.Extension{ + Id: OIDExtensionAuthorityKeyId, + Critical: false, + Value: issuerKeyID, + } + tbs.Extensions = append(tbs.Extensions, authKeyIDExt) + } + + // Clear out the asn1.RawContent so the re-marshal operation sees the + // updated structure (rather than just copying the out-of-date DER data). + tbs.Raw = nil + } + + data, err = asn1.Marshal(tbs) + if err != nil { + return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err) + } + return data, nil +} + +type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +// RFC 5280, 4.2.1.4 +type policyInformation struct { + Policy asn1.ObjectIdentifier + // policyQualifiers omitted +} + +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// RFC 5280, 4.2.2.1 +type accessDescription struct { + Method asn1.ObjectIdentifier + Location asn1.RawValue +} + +// RFC 5280, 4.2.1.14 +type distributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + Reason asn1.BitString `asn1:"optional,tag:1"` + CRLIssuer asn1.RawValue `asn1:"optional,tag:2"` +} + +type distributionPointName struct { + FullName []asn1.RawValue `asn1:"optional,tag:0"` + RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` +} + +func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) { + asn1Data := keyData.PublicKey.RightAlign() + switch algo { + case RSA, RSAESOAEP: + // RSA public keys must have a NULL in the parameters. + // See RFC 3279, Section 2.3.1. + if algo == RSA && !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) { + nfe.AddError(errors.New("x509: RSA key missing NULL parameters")) + } + if algo == RSAESOAEP { + // We only parse the parameters to ensure it is a valid encoding, we throw out the actual values + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(rsaesoaepAlgorithmParameters) + params.HashFunc = sha1Identifier + params.MaskgenFunc = mgf1SHA1Identifier + params.PSourceFunc = pSpecifiedEmptyIdentifier + rest, err := asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after RSAES-OAEP parameters") + } + } + + p := new(pkcs1PublicKey) + rest, err := asn1.Unmarshal(asn1Data, p) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, p, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after RSA public key") + } + + if p.N.Sign() <= 0 { + nfe.AddError(errors.New("x509: RSA modulus is not a positive number")) + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + // TODO(dkarch): Update to return the parameters once crypto/x509 has come up with permanent solution (https://github.com/golang/go/issues/30416) + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case DSA: + var p *big.Int + rest, err := asn1.Unmarshal(asn1Data, &p) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &p, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA public key") + } + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(dsaAlgorithmParameters) + rest, err = asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA parameters") + } + if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + pub := &dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: params.P, + Q: params.Q, + G: params.G, + }, + Y: p, + } + return pub, nil + case ECDSA: + paramsData := keyData.Algorithm.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + rest, err := asn1.Unmarshal(paramsData, namedCurveOID) + if err != nil { + return nil, errors.New("x509: failed to parse ECDSA parameters as named curve") + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ECDSA parameters") + } + namedCurve := namedCurveFromOID(*namedCurveOID, nfe) + if namedCurve == nil { + return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID) + } + x, y := elliptic.Unmarshal(namedCurve, asn1Data) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + pub := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + return pub, nil + case Ed25519: + return ed25519.PublicKey(asn1Data), nil + default: + return nil, nil + } +} + +// NonFatalErrors is an error type which can hold a number of other errors. +// It's used to collect a range of non-fatal errors which occur while parsing +// a certificate, that way we can still match on certs which technically are +// invalid. +type NonFatalErrors struct { + Errors []error +} + +// AddError adds an error to the list of errors contained by NonFatalErrors. +func (e *NonFatalErrors) AddError(err error) { + e.Errors = append(e.Errors, err) +} + +// Returns a string consisting of the values of Error() from all of the errors +// contained in |e| +func (e NonFatalErrors) Error() string { + r := "NonFatalErrors: " + for _, err := range e.Errors { + r += err.Error() + "; " + } + return r +} + +// HasError returns true if |e| contains at least one error +func (e *NonFatalErrors) HasError() bool { + if e == nil { + return false + } + return len(e.Errors) > 0 +} + +// Append combines the contents of two NonFatalErrors instances. +func (e *NonFatalErrors) Append(more *NonFatalErrors) *NonFatalErrors { + if e == nil { + return more + } + if more == nil { + return e + } + combined := NonFatalErrors{Errors: make([]error, 0, len(e.Errors)+len(more.Errors))} + combined.Errors = append(combined.Errors, e.Errors...) + combined.Errors = append(combined.Errors, more.Errors...) + return &combined +} + +// IsFatal indicates whether an error is fatal. +func IsFatal(err error) bool { + if err == nil { + return false + } + if _, ok := err.(NonFatalErrors); ok { + return false + } + if errs, ok := err.(*Errors); ok { + return errs.Fatal() + } + return true +} + +func parseDistributionPoints(data []byte, crldp *[]string) error { + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + + var cdp []distributionPoint + if rest, err := asn1.Unmarshal(data, &cdp); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after X.509 CRL distribution point") + } + + for _, dp := range cdp { + // Per RFC 5280, 4.2.1.13, one of distributionPoint or cRLIssuer may be empty. + if len(dp.DistributionPoint.FullName) == 0 { + continue + } + + for _, fullName := range dp.DistributionPoint.FullName { + if fullName.Tag == 6 { + *crldp = append(*crldp, string(fullName.Bytes)) + } + } + } + return nil +} + +func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + rest, err := asn1.Unmarshal(extension, &seq) + if err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return err + } + + if err := callback(v.Tag, v.Bytes); err != nil { + return err + } + } + + return nil +} + +func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) { + err = forEachSAN(value, func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + emailAddresses = append(emailAddresses, string(data)) + case nameTypeDNS: + dnsNames = append(dnsNames, string(data)) + case nameTypeURI: + uri, err := url.Parse(string(data)) + if err != nil { + return fmt.Errorf("x509: cannot parse URI %q: %s", string(data), err) + } + if len(uri.Host) > 0 { + if _, ok := domainToReverseLabels(uri.Host); !ok { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", string(data)) + } + } + uris = append(uris, uri) + case nameTypeIP: + switch len(data) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, data) + default: + nfe.AddError(errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data)))) + } + } + + return nil + }) + + return +} + +// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits. +func isValidIPMask(mask []byte) bool { + seenZero := false + + for _, b := range mask { + if seenZero { + if b != 0 { + return false + } + + continue + } + + switch b { + case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe: + seenZero = true + case 0xff: + default: + return false + } + } + + return true +} + +func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonFatalErrors) (unhandled bool, err error) { + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + outer := cryptobyte.String(e.Value) + var toplevel, permitted, excluded cryptobyte.String + var havePermitted, haveExcluded bool + if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) || + !outer.Empty() || + !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) || + !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) || + !toplevel.Empty() { + return false, errors.New("x509: invalid NameConstraints extension") + } + + if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 { + // From RFC 5280, Section 4.2.1.10: + // “either the permittedSubtrees field + // or the excludedSubtrees MUST be + // present” + return false, errors.New("x509: empty name constraints extension") + } + + getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) { + for !subtrees.Empty() { + var seq, value cryptobyte.String + var tag cryptobyte_asn1.Tag + if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) || + !seq.ReadAnyASN1(&value, &tag) { + return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension") + } + + var ( + dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific() + emailTag = cryptobyte_asn1.Tag(1).ContextSpecific() + ipTag = cryptobyte_asn1.Tag(7).ContextSpecific() + uriTag = cryptobyte_asn1.Tag(6).ContextSpecific() + ) + + switch tag { + case dnsTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain + // itself, but that's not valid in a + // normal domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)) + } + dnsNames = append(dnsNames, domain) + + case ipTag: + l := len(value) + var ip, mask []byte + + switch l { + case 8: + ip = value[:4] + mask = value[4:] + + case 32: + ip = value[:16] + mask = value[16:] + + default: + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l) + } + + if !isValidIPMask(mask) { + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask) + } + + ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)}) + + case emailTag: + constraint := string(value) + if err := isIA5String(constraint); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + // If the constraint contains an @ then + // it specifies an exact mailbox name. + if strings.Contains(constraint, "@") { + if _, ok := parseRFC2821Mailbox(constraint); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)) + } + } else { + // Otherwise it's a domain name. + domain := constraint + if len(domain) > 0 && domain[0] == '.' { + domain = domain[1:] + } + if _, ok := domainToReverseLabels(domain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)) + } + } + emails = append(emails, constraint) + + case uriTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + if net.ParseIP(domain) != nil { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain itself, + // but that's not valid in a normal + // domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse URI constraint %q", domain)) + } + uriDomains = append(uriDomains, domain) + + default: + unhandled = true + } + } + + return dnsNames, ips, emails, uriDomains, nil + } + + if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil { + return false, err + } + if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil { + return false, err + } + out.PermittedDNSDomainsCritical = e.Critical + + return unhandled, nil +} + +func parseCertificate(in *certificate, tbsOnly bool) (*Certificate, error) { + var nfe NonFatalErrors + + // Certificates contain two signature algorithm identifier fields, + // one in the inner signed tbsCertificate structure and one in the + // outer unsigned certificate structure. RFC 5280 requires these + // fields match, but golang doesn't impose this restriction. Because + // the outer structure is not covered by the signature the algorithm + // field is entirely malleable. This allows a user to bypass the + // leaf data uniqueness check that happens in trillian by altering + // the unbounded OID or parameter fields of the algorithmIdentifier + // structure and submit an infinite number of duplicate but slightly + // different looking certificates to a log. To avoid this directly + // compare the bytes of the two algorithmIdentifier structures + // and reject the certificate if they do not match. + if !tbsOnly && !bytes.Equal(in.SignatureAlgorithm.Raw, in.TBSCertificate.SignatureAlgorithm.Raw) { + return nil, errors.New("x509: mismatching signature algorithm identifiers") + } + + out := new(Certificate) + out.Raw = in.Raw + out.RawTBSCertificate = in.TBSCertificate.Raw + out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw + out.RawSubject = in.TBSCertificate.Subject.FullBytes + out.RawIssuer = in.TBSCertificate.Issuer.FullBytes + + out.Signature = in.SignatureValue.RightAlign() + out.SignatureAlgorithm = SignatureAlgorithmFromAI(in.TBSCertificate.SignatureAlgorithm) + + out.PublicKeyAlgorithm = + getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm) + var err error + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe) + if err != nil { + return nil, err + } + + out.Version = in.TBSCertificate.Version + 1 + out.SerialNumber = in.TBSCertificate.SerialNumber + + var issuer, subject pkix.RDNSequence + if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Subject.FullBytes, &subject, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject") + } + if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Issuer.FullBytes, &issuer, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject") + } + + out.Issuer.FillFromRDNSequence(&issuer) + out.Subject.FillFromRDNSequence(&subject) + + out.NotBefore = in.TBSCertificate.Validity.NotBefore + out.NotAfter = in.TBSCertificate.Validity.NotAfter + + for _, e := range in.TBSCertificate.Extensions { + out.Extensions = append(out.Extensions, e) + unhandled := false + + if len(e.Id) == 4 && e.Id[0] == OIDExtensionArc[0] && e.Id[1] == OIDExtensionArc[1] && e.Id[2] == OIDExtensionArc[2] { + switch e.Id[3] { + case OIDExtensionKeyUsage[3]: + // RFC 5280, 4.2.1.3 + var usageBits asn1.BitString + if rest, err := asn1.Unmarshal(e.Value, &usageBits); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 KeyUsage") + } + + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + out.KeyUsage = KeyUsage(usage) + + case OIDExtensionBasicConstraints[3]: + // RFC 5280, 4.2.1.9 + var constraints basicConstraints + if rest, err := asn1.Unmarshal(e.Value, &constraints); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 BasicConstraints") + } + + out.BasicConstraintsValid = true + out.IsCA = constraints.IsCA + out.MaxPathLen = constraints.MaxPathLen + out.MaxPathLenZero = out.MaxPathLen == 0 + // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285) + + case OIDExtensionSubjectAltName[3]: + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value, &nfe) + if err != nil { + return nil, err + } + + if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 { + // If we didn't parse anything then we do the critical check, below. + unhandled = true + } + + case OIDExtensionNameConstraints[3]: + unhandled, err = parseNameConstraintsExtension(out, e, &nfe) + if err != nil { + return nil, err + } + + case OIDExtensionCRLDistributionPoints[3]: + // RFC 5280, 4.2.1.13 + if err := parseDistributionPoints(e.Value, &out.CRLDistributionPoints); err != nil { + return nil, err + } + + case OIDExtensionAuthorityKeyId[3]: + // RFC 5280, 4.2.1.1 + var a authKeyId + if rest, err := asn1.Unmarshal(e.Value, &a); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 authority key-id") + } + out.AuthorityKeyId = a.Id + + case OIDExtensionExtendedKeyUsage[3]: + // RFC 5280, 4.2.1.12. Extended Key Usage + + // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } + // + // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId + // + // KeyPurposeId ::= OBJECT IDENTIFIER + + var keyUsage []asn1.ObjectIdentifier + if len(e.Value) == 0 { + nfe.AddError(errors.New("x509: empty ExtendedKeyUsage")) + } else { + rest, err := asn1.Unmarshal(e.Value, &keyUsage) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(e.Value, &keyUsage, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage") + } + } + + for _, u := range keyUsage { + if extKeyUsage, ok := extKeyUsageFromOID(u); ok { + out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage) + } else { + out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u) + } + } + + case OIDExtensionSubjectKeyId[3]: + // RFC 5280, 4.2.1.2 + var keyid []byte + if rest, err := asn1.Unmarshal(e.Value, &keyid); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 key-id") + } + out.SubjectKeyId = keyid + + case OIDExtensionCertificatePolicies[3]: + // RFC 5280 4.2.1.4: Certificate Policies + var policies []policyInformation + if rest, err := asn1.Unmarshal(e.Value, &policies); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 certificate policies") + } + out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies)) + for i, policy := range policies { + out.PolicyIdentifiers[i] = policy.Policy + } + + default: + // Unknown extensions are recorded if critical. + unhandled = true + } + } else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + var aia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 authority information") + } + if len(aia) == 0 { + nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension")) + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(OIDAuthorityInfoAccessOCSP) { + out.OCSPServer = append(out.OCSPServer, string(v.Location.Bytes)) + } else if v.Method.Equal(OIDAuthorityInfoAccessIssuers) { + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes)) + } + } + } else if e.Id.Equal(OIDExtensionSubjectInfoAccess) { + // RFC 5280 4.2.2.2: Subject Information Access + var sia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject information") + } + if len(sia) == 0 { + nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension")) + } + + for _, v := range sia { + // TODO(drysdale): cope with non-URI types of GeneralName + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(OIDSubjectInfoAccessTimestamp) { + out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes)) + } else if v.Method.Equal(OIDSubjectInfoAccessCARepo) { + out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes)) + } + } + } else if e.Id.Equal(OIDExtensionIPPrefixList) { + out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe) + } else if e.Id.Equal(OIDExtensionASList) { + out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe) + } else if e.Id.Equal(OIDExtensionCTSCT) { + if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err)) + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASN1-encoded SCT list")) + } else { + if rest, err := tls.Unmarshal(out.RawSCT, &out.SCTList); err != nil { + nfe.AddError(fmt.Errorf("failed to tls.Unmarshal SCT list: %v", err)) + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after TLS-encoded SCT list")) + } + } + } else { + // Unknown extensions are recorded if critical. + unhandled = true + } + + if e.Critical && unhandled { + out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id) + } + } + if nfe.HasError() { + return out, nfe + } + return out, nil +} + +// ParseTBSCertificate parses a single TBSCertificate from the given ASN.1 DER data. +// The parsed data is returned in a Certificate struct for ease of access. +func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) { + var tbsCert tbsCertificate + var nfe NonFatalErrors + rest, err := asn1.Unmarshal(asn1Data, &tbsCert) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &tbsCert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + ret, err := parseCertificate(&certificate{ + Raw: tbsCert.Raw, + TBSCertificate: tbsCert}, true) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +// ParseCertificate parses a single certificate from the given ASN.1 DER data. +// This function can return both a Certificate and an error (in which case the +// error will be of type NonFatalErrors). +func ParseCertificate(asn1Data []byte) (*Certificate, error) { + var cert certificate + var nfe NonFatalErrors + rest, err := asn1.Unmarshal(asn1Data, &cert) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + ret, err := parseCertificate(&cert, false) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +// This function can return both a slice of Certificate and an error (in which +// case the error will be of type NonFatalErrors). +func ParseCertificates(asn1Data []byte) ([]*Certificate, error) { + var v []*certificate + var nfe NonFatalErrors + + for len(asn1Data) > 0 { + cert := new(certificate) + var err error + asn1Data, err = asn1.Unmarshal(asn1Data, cert) + if err != nil { + var laxErr error + asn1Data, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + v = append(v, cert) + } + + ret := make([]*Certificate, len(v)) + for i, ci := range v { + cert, err := parseCertificate(ci, false) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + ret[i] = cert + } + + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +func reverseBitsInAByte(in byte) byte { + b1 := in>>4 | in<<4 + b2 := b1>>2&0x33 | b1<<2&0xcc + b3 := b2>>1&0x55 | b2<<1&0xaa + return b3 +} + +// asn1BitLength returns the bit-length of bitString by considering the +// most-significant bit in a byte to be the "first" bit. This convention +// matches ASN.1, but differs from almost everything else. +func asn1BitLength(bitString []byte) int { + bitLen := len(bitString) * 8 + + for i := range bitString { + b := bitString[len(bitString)-i-1] + + for bit := uint(0); bit < 8; bit++ { + if (b>>bit)&1 == 1 { + return bitLen + } + bitLen-- + } + } + + return 0 +} + +// OID values for standard extensions from RFC 5280. +var ( + OIDExtensionArc = asn1.ObjectIdentifier{2, 5, 29} // id-ce RFC5280 s4.2.1 + OIDExtensionSubjectKeyId = asn1.ObjectIdentifier{2, 5, 29, 14} + OIDExtensionKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 15} + OIDExtensionExtendedKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 37} + OIDExtensionAuthorityKeyId = asn1.ObjectIdentifier{2, 5, 29, 35} + OIDExtensionBasicConstraints = asn1.ObjectIdentifier{2, 5, 29, 19} + OIDExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} + OIDExtensionCertificatePolicies = asn1.ObjectIdentifier{2, 5, 29, 32} + OIDExtensionNameConstraints = asn1.ObjectIdentifier{2, 5, 29, 30} + OIDExtensionCRLDistributionPoints = asn1.ObjectIdentifier{2, 5, 29, 31} + OIDExtensionIssuerAltName = asn1.ObjectIdentifier{2, 5, 29, 18} + OIDExtensionSubjectDirectoryAttributes = asn1.ObjectIdentifier{2, 5, 29, 9} + OIDExtensionInhibitAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 54} + OIDExtensionPolicyConstraints = asn1.ObjectIdentifier{2, 5, 29, 36} + OIDExtensionPolicyMappings = asn1.ObjectIdentifier{2, 5, 29, 33} + OIDExtensionFreshestCRL = asn1.ObjectIdentifier{2, 5, 29, 46} + + OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1} + OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11} + + // OIDExtensionCTPoison is defined in RFC 6962 s3.1. + OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} + // OIDExtensionCTSCT is defined in RFC 6962 s3.3. + OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} + // OIDExtensionIPPrefixList is defined in RFC 3779 s2. + OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7} + // OIDExtensionASList is defined in RFC 3779 s3. + OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8} +) + +var ( + OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} + OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3} + OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5} + OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0} +) + +// oidInExtensions reports whether an extension with the given oid exists in +// extensions. +func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { + for _, e := range extensions { + if e.Id.Equal(oid) { + return true + } + } + return false +} + +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: asn1.ClassContextSpecific, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: asn1.ClassContextSpecific, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: asn1.ClassContextSpecific, Bytes: []byte(uri.String())}) + } + return asn1.Marshal(rawValues) +} + +func isIA5String(s string) error { + for _, r := range s { + if r >= utf8.RuneSelf { + return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) + } + } + + return nil +} + +func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) { + ret = make([]pkix.Extension, 12 /* maximum number of elements. */) + n := 0 + + if template.KeyUsage != 0 && + !oidInExtensions(OIDExtensionKeyUsage, template.ExtraExtensions) { + ret[n].Id = OIDExtensionKeyUsage + ret[n].Critical = true + + var a [2]byte + a[0] = reverseBitsInAByte(byte(template.KeyUsage)) + a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8)) + + l := 1 + if a[1] != 0 { + l = 2 + } + + bitString := a[:l] + ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)}) + if err != nil { + return + } + n++ + } + + if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) && + !oidInExtensions(OIDExtensionExtendedKeyUsage, template.ExtraExtensions) { + ret[n].Id = OIDExtensionExtendedKeyUsage + + var oids []asn1.ObjectIdentifier + for _, u := range template.ExtKeyUsage { + if oid, ok := oidFromExtKeyUsage(u); ok { + oids = append(oids, oid) + } else { + panic("internal error") + } + } + + oids = append(oids, template.UnknownExtKeyUsage...) + + ret[n].Value, err = asn1.Marshal(oids) + if err != nil { + return + } + n++ + } + + if template.BasicConstraintsValid && !oidInExtensions(OIDExtensionBasicConstraints, template.ExtraExtensions) { + // Leaving MaxPathLen as zero indicates that no maximum path + // length is desired, unless MaxPathLenZero is set. A value of + // -1 causes encoding/asn1 to omit the value as desired. + maxPathLen := template.MaxPathLen + if maxPathLen == 0 && !template.MaxPathLenZero { + maxPathLen = -1 + } + ret[n].Id = OIDExtensionBasicConstraints + ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, maxPathLen}) + ret[n].Critical = true + if err != nil { + return + } + n++ + } + + if len(template.SubjectKeyId) > 0 && !oidInExtensions(OIDExtensionSubjectKeyId, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectKeyId + ret[n].Value, err = asn1.Marshal(template.SubjectKeyId) + if err != nil { + return + } + n++ + } + + if len(authorityKeyId) > 0 && !oidInExtensions(OIDExtensionAuthorityKeyId, template.ExtraExtensions) { + ret[n].Id = OIDExtensionAuthorityKeyId + ret[n].Value, err = asn1.Marshal(authKeyId{authorityKeyId}) + if err != nil { + return + } + n++ + } + + if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) && + !oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) { + ret[n].Id = OIDExtensionAuthorityInfoAccess + var aiaValues []accessDescription + for _, name := range template.OCSPServer { + aiaValues = append(aiaValues, accessDescription{ + Method: OIDAuthorityInfoAccessOCSP, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }) + } + for _, name := range template.IssuingCertificateURL { + aiaValues = append(aiaValues, accessDescription{ + Method: OIDAuthorityInfoAccessIssuers, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }) + } + ret[n].Value, err = asn1.Marshal(aiaValues) + if err != nil { + return + } + n++ + } + + if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 && + !oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectInfoAccess + var siaValues []accessDescription + for _, ts := range template.SubjectTimestamps { + siaValues = append(siaValues, accessDescription{ + Method: OIDSubjectInfoAccessTimestamp, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)}, + }) + } + for _, repo := range template.SubjectCARepositories { + siaValues = append(siaValues, accessDescription{ + Method: OIDSubjectInfoAccessCARepo, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)}, + }) + } + ret[n].Value, err = asn1.Marshal(siaValues) + if err != nil { + return + } + n++ + } + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && + !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectAltName + // From RFC 5280, Section 4.2.1.6: + // “If the subject field contains an empty sequence ... then + // subjectAltName extension ... is marked as critical” + ret[n].Critical = subjectIsEmpty + ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) + if err != nil { + return + } + n++ + } + + if len(template.PolicyIdentifiers) > 0 && + !oidInExtensions(OIDExtensionCertificatePolicies, template.ExtraExtensions) { + ret[n].Id = OIDExtensionCertificatePolicies + policies := make([]policyInformation, len(template.PolicyIdentifiers)) + for i, policy := range template.PolicyIdentifiers { + policies[i].Policy = policy + } + ret[n].Value, err = asn1.Marshal(policies) + if err != nil { + return + } + n++ + } + + if (len(template.PermittedDNSDomains) > 0 || len(template.ExcludedDNSDomains) > 0 || + len(template.PermittedIPRanges) > 0 || len(template.ExcludedIPRanges) > 0 || + len(template.PermittedEmailAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 || + len(template.PermittedURIDomains) > 0 || len(template.ExcludedURIDomains) > 0) && + !oidInExtensions(OIDExtensionNameConstraints, template.ExtraExtensions) { + ret[n].Id = OIDExtensionNameConstraints + ret[n].Critical = template.PermittedDNSDomainsCritical + + ipAndMask := func(ipNet *net.IPNet) []byte { + maskedIP := ipNet.IP.Mask(ipNet.Mask) + ipAndMask := make([]byte, 0, len(maskedIP)+len(ipNet.Mask)) + ipAndMask = append(ipAndMask, maskedIP...) + ipAndMask = append(ipAndMask, ipNet.Mask...) + return ipAndMask + } + + serialiseConstraints := func(dns []string, ips []*net.IPNet, emails []string, uriDomains []string) (der []byte, err error) { + var b cryptobyte.Builder + + for _, name := range dns { + if err = isIA5String(name); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(2).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(name)) + }) + }) + } + + for _, ipNet := range ips { + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(7).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes(ipAndMask(ipNet)) + }) + }) + } + + for _, email := range emails { + if err = isIA5String(email); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(email)) + }) + }) + } + + for _, uriDomain := range uriDomains { + if err = isIA5String(uriDomain); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(6).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(uriDomain)) + }) + }) + } + + return b.Bytes() + } + + permitted, err := serialiseConstraints(template.PermittedDNSDomains, template.PermittedIPRanges, template.PermittedEmailAddresses, template.PermittedURIDomains) + if err != nil { + return nil, err + } + + excluded, err := serialiseConstraints(template.ExcludedDNSDomains, template.ExcludedIPRanges, template.ExcludedEmailAddresses, template.ExcludedURIDomains) + if err != nil { + return nil, err + } + + var b cryptobyte.Builder + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + if len(permitted) > 0 { + b.AddASN1(cryptobyte_asn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddBytes(permitted) + }) + } + + if len(excluded) > 0 { + b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddBytes(excluded) + }) + } + }) + + ret[n].Value, err = b.Bytes() + if err != nil { + return nil, err + } + n++ + } + + if len(template.CRLDistributionPoints) > 0 && + !oidInExtensions(OIDExtensionCRLDistributionPoints, template.ExtraExtensions) { + ret[n].Id = OIDExtensionCRLDistributionPoints + + var crlDp []distributionPoint + for _, name := range template.CRLDistributionPoints { + dp := distributionPoint{ + DistributionPoint: distributionPointName{ + FullName: []asn1.RawValue{ + {Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }, + }, + } + crlDp = append(crlDp, dp) + } + + ret[n].Value, err = asn1.Marshal(crlDp) + if err != nil { + return + } + n++ + } + + if (len(template.RawSCT) > 0 || len(template.SCTList.SCTList) > 0) && !oidInExtensions(OIDExtensionCTSCT, template.ExtraExtensions) { + rawSCT := template.RawSCT + if len(template.SCTList.SCTList) > 0 { + rawSCT, err = tls.Marshal(template.SCTList) + if err != nil { + return + } + } + ret[n].Id = OIDExtensionCTSCT + ret[n].Value, err = asn1.Marshal(rawSCT) + if err != nil { + return + } + n++ + } + + // Adding another extension here? Remember to update the maximum number + // of elements in the make() at the top of the function and the list of + // template fields used in CreateCertificate documentation. + + return append(ret[:n], template.ExtraExtensions...), nil +} + +func subjectBytes(cert *Certificate) ([]byte, error) { + if len(cert.RawSubject) > 0 { + return cert.RawSubject, nil + } + + return asn1.Marshal(cert.Subject.ToRDNSequence()) +} + +// signingParamsForPublicKey returns the parameters to use for signing with +// priv. If requestedSigAlgo is not zero then it overrides the default +// signature algorithm. +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.NullRawValue + + case *ecdsa.PublicKey: + pubType = ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + case ed25519.PublicKey: + pubType = Ed25519 + sigAlgo.Algorithm = oidSignatureEd25519 + + default: + err = errors.New("x509: only RSA, ECDSA and Ed25519 keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 && pubType != Ed25519 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + if requestedSigAlgo.isRSAPSS() { + sigAlgo.Parameters = rsaPSSParameters(hashFunc) + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// emptyASN1Subject is the ASN.1 DER encoding of an empty Subject, which is +// just an empty SEQUENCE. +var emptyASN1Subject = []byte{0x30, 0} + +// CreateCertificate creates a new X.509v3 certificate based on a template. +// The following members of template are used: +// - SerialNumber +// - Subject +// - NotBefore, NotAfter +// - SignatureAlgorithm +// - For extensions: +// - KeyUsage +// - ExtKeyUsage, UnknownExtKeyUsage +// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero +// - SubjectKeyId +// - AuthorityKeyId +// - OCSPServer, IssuingCertificateURL +// - SubjectTimestamps, SubjectCARepositories +// - DNSNames, EmailAddresses, IPAddresses, URIs +// - PolicyIdentifiers +// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical, +// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains +// - CRLDistributionPoints +// - RawSCT, SCTList +// - ExtraExtensions +// +// The certificate is signed by parent. If parent is equal to template then the +// certificate is self-signed. The parameter pub is the public key of the +// signee and priv is the private key of the signer. +// +// The returned slice is the certificate in DER encoding. +// +// The currently supported key types are *rsa.PublicKey, *ecdsa.PublicKey and +// ed25519.PublicKey. pub must be a supported key type, and priv must be a +// crypto.Signer with a supported public key. +// +// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any, +// unless the resulting certificate is self-signed. Otherwise the value from +// template will be used. +func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv interface{}) (cert []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + if template.SerialNumber == nil { + return nil, errors.New("x509: no SerialNumber given") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub) + if err != nil { + return nil, err + } + + asn1Issuer, err := subjectBytes(parent) + if err != nil { + return + } + + asn1Subject, err := subjectBytes(template) + if err != nil { + return + } + + authorityKeyId := template.AuthorityKeyId + if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 { + authorityKeyId = parent.SubjectKeyId + } + + extensions, err := buildExtensions(template, bytes.Equal(asn1Subject, emptyASN1Subject), authorityKeyId) + if err != nil { + return + } + + encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes} + c := tbsCertificate{ + Version: 2, + SerialNumber: template.SerialNumber, + SignatureAlgorithm: signatureAlgorithm, + Issuer: asn1.RawValue{FullBytes: asn1Issuer}, + Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()}, + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey}, + Extensions: extensions, + } + + tbsCertContents, err := asn1.Marshal(c) + if err != nil { + return + } + c.Raw = tbsCertContents + + signed := tbsCertContents + if hashFunc != 0 { + h := hashFunc.New() + h.Write(signed) + signed = h.Sum(nil) + } + + var signerOpts crypto.SignerOpts = hashFunc + if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() { + signerOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + Hash: hashFunc, + } + } + + var signature []byte + signature, err = key.Sign(rand, signed, signerOpts) + if err != nil { + return + } + + return asn1.Marshal(certificate{ + nil, + c, + signatureAlgorithm, + asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// pemCRLPrefix is the magic string that indicates that we have a PEM encoded +// CRL. +var pemCRLPrefix = []byte("-----BEGIN X509 CRL") + +// pemType is the type of a PEM encoded CRL. +var pemType = "X509 CRL" + +// ParseCRL parses a CRL from the given bytes. It's often the case that PEM +// encoded CRLs will appear where they should be DER encoded, so this function +// will transparently handle PEM encoding as long as there isn't any leading +// garbage. +func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) { + if bytes.HasPrefix(crlBytes, pemCRLPrefix) { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == pemType { + crlBytes = block.Bytes + } + } + return ParseDERCRL(crlBytes) +} + +// ParseDERCRL parses a DER encoded CRL from the given bytes. +func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) { + certList := new(pkix.CertificateList) + if rest, err := asn1.Unmarshal(derBytes, certList); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after CRL") + } + return certList, nil +} + +// CreateCRL returns a DER encoded CRL, signed by this Certificate, that +// contains the given list of revoked certificates. +func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), 0) + if err != nil { + return nil, err + } + + // Force revocation times to UTC per RFC 5280. + revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts)) + for i, rc := range revokedCerts { + rc.RevocationTime = rc.RevocationTime.UTC() + revokedCertsUTC[i] = rc + } + + tbsCertList := pkix.TBSCertificateList{ + Version: 1, + Signature: signatureAlgorithm, + Issuer: c.Subject.ToRDNSequence(), + ThisUpdate: now.UTC(), + NextUpdate: expiry.UTC(), + RevokedCertificates: revokedCertsUTC, + } + + // Authority Key Id + if len(c.SubjectKeyId) > 0 { + var aki pkix.Extension + aki.Id = OIDExtensionAuthorityKeyId + aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId}) + if err != nil { + return + } + tbsCertList.Extensions = append(tbsCertList.Extensions, aki) + } + + tbsCertListContents, err := asn1.Marshal(tbsCertList) + if err != nil { + return + } + + signed := tbsCertListContents + if hashFunc != 0 { + h := hashFunc.New() + h.Write(signed) + signed = h.Sum(nil) + } + + var signature []byte + signature, err = key.Sign(rand, signed, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(pkix.CertificateList{ + TBSCertList: tbsCertList, + SignatureAlgorithm: signatureAlgorithm, + SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// CertificateRequest represents a PKCS #10, certificate signature request. +type CertificateRequest struct { + Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature). + RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject. + + Version int + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Subject pkix.Name + + // Attributes contains the CSR attributes that can parse as + // pkix.AttributeTypeAndValueSET. + // + // Deprecated: Use Extensions and ExtraExtensions instead for parsing and + // generating the requestedExtensions attribute. + Attributes []pkix.AttributeTypeAndValueSET + + // Extensions contains all requested extensions, in raw form. When parsing + // CSRs, this can be used to extract extensions that are not parsed by this + // package. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any CSR + // marshaled by CreateCertificateRequest. Values override any extensions + // that would otherwise be produced based on the other fields but are + // overridden by any extensions specified in Attributes. + // + // The ExtraExtensions field is not populated by ParseCertificateRequest, + // see Extensions instead. + ExtraExtensions []pkix.Extension + + // Subject Alternate Name values. + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL +} + +// These structures reflect the ASN.1 structure of X.509 certificate +// signature requests (see RFC 2986): + +type tbsCertificateRequest struct { + Raw asn1.RawContent + Version int + Subject asn1.RawValue + PublicKey publicKeyInfo + RawAttributes []asn1.RawValue `asn1:"tag:0"` +} + +type certificateRequest struct { + Raw asn1.RawContent + TBSCSR tbsCertificateRequest + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// oidExtensionRequest is a PKCS#9 OBJECT IDENTIFIER that indicates requested +// extensions in a CSR. +var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14} + +// newRawAttributes converts AttributeTypeAndValueSETs from a template +// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes. +func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) { + var rawAttributes []asn1.RawValue + b, err := asn1.Marshal(attributes) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(b, &rawAttributes) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: failed to unmarshal raw CSR Attributes") + } + return rawAttributes, nil +} + +// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs. +func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET { + var attributes []pkix.AttributeTypeAndValueSET + for _, rawAttr := range rawAttributes { + var attr pkix.AttributeTypeAndValueSET + rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr) + // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET + // (i.e.: challengePassword or unstructuredName). + if err == nil && len(rest) == 0 { + attributes = append(attributes, attr) + } + } + return attributes +} + +// parseCSRExtensions parses the attributes from a CSR and extracts any +// requested extensions. +func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) { + // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1. + type pkcs10Attribute struct { + Id asn1.ObjectIdentifier + Values []asn1.RawValue `asn1:"set"` + } + + var ret []pkix.Extension + for _, rawAttr := range rawAttributes { + var attr pkcs10Attribute + if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 { + // Ignore attributes that don't parse. + continue + } + + if !attr.Id.Equal(oidExtensionRequest) { + continue + } + + var extensions []pkix.Extension + if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil { + return nil, err + } + ret = append(ret, extensions...) + } + + return ret, nil +} + +// CreateCertificateRequest creates a new certificate request based on a +// template. The following members of template are used: +// +// - SignatureAlgorithm +// - Subject +// - DNSNames +// - EmailAddresses +// - IPAddresses +// - URIs +// - ExtraExtensions +// - Attributes (deprecated) +// +// priv is the private key to sign the CSR with, and the corresponding public +// key will be included in the CSR. It must implement crypto.Signer and its +// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey or a +// ed25519.PublicKey. (A *rsa.PrivateKey, *ecdsa.PrivateKey or +// ed25519.PrivateKey satisfies this.) +// +// The returned slice is the certificate request in DER encoding. +func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv interface{}) (csr []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + var hashFunc crypto.Hash + var sigAlgo pkix.AlgorithmIdentifier + hashFunc, sigAlgo, err = signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public()) + if err != nil { + return nil, err + } + + var extensions []pkix.Extension + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && + !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) { + sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) + if err != nil { + return nil, err + } + + extensions = append(extensions, pkix.Extension{ + Id: OIDExtensionSubjectAltName, + Value: sanBytes, + }) + } + + extensions = append(extensions, template.ExtraExtensions...) + + // Make a copy of template.Attributes because we may alter it below. + attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes)) + for _, attr := range template.Attributes { + values := make([][]pkix.AttributeTypeAndValue, len(attr.Value)) + copy(values, attr.Value) + attributes = append(attributes, pkix.AttributeTypeAndValueSET{ + Type: attr.Type, + Value: values, + }) + } + + extensionsAppended := false + if len(extensions) > 0 { + // Append the extensions to an existing attribute if possible. + for _, atvSet := range attributes { + if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 { + continue + } + + // specifiedExtensions contains all the extensions that we + // found specified via template.Attributes. + specifiedExtensions := make(map[string]bool) + + for _, atvs := range atvSet.Value { + for _, atv := range atvs { + specifiedExtensions[atv.Type.String()] = true + } + } + + newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions)) + newValue = append(newValue, atvSet.Value[0]...) + + for _, e := range extensions { + if specifiedExtensions[e.Id.String()] { + // Attributes already contained a value for + // this extension and it takes priority. + continue + } + + newValue = append(newValue, pkix.AttributeTypeAndValue{ + // There is no place for the critical + // flag in an AttributeTypeAndValue. + Type: e.Id, + Value: e.Value, + }) + } + + atvSet.Value[0] = newValue + extensionsAppended = true + break + } + } + + rawAttributes, err := newRawAttributes(attributes) + if err != nil { + return + } + + // If not included in attributes, add a new attribute for the + // extensions. + if len(extensions) > 0 && !extensionsAppended { + attr := struct { + Type asn1.ObjectIdentifier + Value [][]pkix.Extension `asn1:"set"` + }{ + Type: oidExtensionRequest, + Value: [][]pkix.Extension{extensions}, + } + + b, err := asn1.Marshal(attr) + if err != nil { + return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error()) + } + + var rawValue asn1.RawValue + if _, err := asn1.Unmarshal(b, &rawValue); err != nil { + return nil, err + } + + rawAttributes = append(rawAttributes, rawValue) + } + + asn1Subject := template.RawSubject + if len(asn1Subject) == 0 { + asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence()) + if err != nil { + return nil, err + } + } + + tbsCSR := tbsCertificateRequest{ + Version: 0, // PKCS #10, RFC 2986 + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{ + Algorithm: publicKeyAlgorithm, + PublicKey: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: len(publicKeyBytes) * 8, + }, + }, + RawAttributes: rawAttributes, + } + + tbsCSRContents, err := asn1.Marshal(tbsCSR) + if err != nil { + return + } + tbsCSR.Raw = tbsCSRContents + + signed := tbsCSRContents + if hashFunc != 0 { + h := hashFunc.New() + h.Write(signed) + signed = h.Sum(nil) + } + + var signature []byte + signature, err = key.Sign(rand, signed, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(certificateRequest{ + TBSCSR: tbsCSR, + SignatureAlgorithm: sigAlgo, + SignatureValue: asn1.BitString{ + Bytes: signature, + BitLength: len(signature) * 8, + }, + }) +} + +// ParseCertificateRequest parses a single certificate request from the +// given ASN.1 DER data. +func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) { + var csr certificateRequest + + rest, err := asn1.Unmarshal(asn1Data, &csr) + if err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + return parseCertificateRequest(&csr) +} + +func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) { + out := &CertificateRequest{ + Raw: in.Raw, + RawTBSCertificateRequest: in.TBSCSR.Raw, + RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw, + RawSubject: in.TBSCSR.Subject.FullBytes, + + Signature: in.SignatureValue.RightAlign(), + SignatureAlgorithm: SignatureAlgorithmFromAI(in.SignatureAlgorithm), + + PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm), + + Version: in.TBSCSR.Version, + Attributes: parseRawAttributes(in.TBSCSR.RawAttributes), + } + + var err error + var nfe NonFatalErrors + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe) + if err != nil { + return nil, err + } + // Treat non-fatal errors as fatal here. + if len(nfe.Errors) > 0 { + return nil, nfe.Errors[0] + } + + var subject pkix.RDNSequence + if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 Subject") + } + + out.Subject.FillFromRDNSequence(&subject) + + if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil { + return nil, err + } + + for _, extension := range out.Extensions { + if extension.Id.Equal(OIDExtensionSubjectAltName) { + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe) + if err != nil { + return nil, err + } + } + } + + return out, nil +} + +// CheckSignature reports whether the signature on c is valid. +func (c *CertificateRequest) CheckSignature() error { + return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature, c.PublicKey) +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/files.go b/vendor/github.com/google/certificate-transparency-go/x509util/files.go new file mode 100644 index 00000000000..823ac7375a9 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/files.go @@ -0,0 +1,116 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509util + +import ( + "encoding/pem" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/google/certificate-transparency-go/x509" +) + +// ReadPossiblePEMFile loads data from a file which may be in DER format +// or may be in PEM format (with the given blockname). +func ReadPossiblePEMFile(filename, blockname string) ([][]byte, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("%s: failed to read data: %v", filename, err) + } + return dePEM(data, blockname), nil +} + +// ReadPossiblePEMURL attempts to determine if the given target is a local file or a +// URL, and return the file contents regardless. It also copes with either PEM or DER +// format data. +func ReadPossiblePEMURL(target, blockname string) ([][]byte, error) { + if !strings.HasPrefix(target, "http://") && !strings.HasPrefix(target, "https://") { + // Assume it's a filename + return ReadPossiblePEMFile(target, blockname) + } + + rsp, err := http.Get(target) + if err != nil { + return nil, fmt.Errorf("failed to http.Get(%q): %v", target, err) + } + data, err := io.ReadAll(rsp.Body) + if err != nil { + return nil, fmt.Errorf("failed to io.ReadAll(%q): %v", target, err) + } + return dePEM(data, blockname), nil +} + +func dePEM(data []byte, blockname string) [][]byte { + var results [][]byte + if strings.Contains(string(data), "BEGIN "+blockname) { + rest := data + for { + var block *pem.Block + block, rest = pem.Decode(rest) + if block == nil { + break + } + if block.Type == blockname { + results = append(results, block.Bytes) + } + } + } else { + results = append(results, data) + } + return results +} + +// ReadFileOrURL returns the data from a target which may be either a filename +// or an HTTP(S) URL. +func ReadFileOrURL(target string, client *http.Client) ([]byte, error) { + u, err := url.Parse(target) + if err != nil || (u.Scheme != "http" && u.Scheme != "https") { + return os.ReadFile(target) + } + + rsp, err := client.Get(u.String()) + if err != nil { + return nil, fmt.Errorf("failed to http.Get(%q): %v", target, err) + } + return io.ReadAll(rsp.Body) +} + +// GetIssuer attempts to retrieve the issuer for a certificate, by examining +// the cert's Authority Information Access extension (if present) for the +// issuer's URL and retrieving from there. +func GetIssuer(cert *x509.Certificate, client *http.Client) (*x509.Certificate, error) { + if len(cert.IssuingCertificateURL) == 0 { + return nil, nil + } + issuerURL := cert.IssuingCertificateURL[0] + rsp, err := client.Get(issuerURL) + if err != nil || rsp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get issuer from %q: %v", issuerURL, err) + } + defer rsp.Body.Close() + body, err := io.ReadAll(rsp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read issuer from %q: %v", issuerURL, err) + } + issuers, err := x509.ParseCertificates(body) + if err != nil { + return nil, fmt.Errorf("failed to parse issuer cert: %v", err) + } + return issuers[0], nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go new file mode 100644 index 00000000000..c21bd650589 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go @@ -0,0 +1,118 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509util + +import ( + "crypto/sha256" + "encoding/pem" + "errors" + "fmt" + "os" + + "github.com/google/certificate-transparency-go/x509" +) + +// String for certificate blocks in BEGIN / END PEM headers +const pemCertificateBlockType string = "CERTIFICATE" + +// PEMCertPool is a wrapper / extension to x509.CertPool. It allows us to access the +// raw certs, which we need to serve get-roots request and has stricter handling on loading +// certs into the pool. CertPool ignores errors if at least one cert loads correctly but +// PEMCertPool requires all certs to load. +type PEMCertPool struct { + // maps from sha-256 to certificate, used for dup detection + fingerprintToCertMap map[[sha256.Size]byte]x509.Certificate + rawCerts []*x509.Certificate + certPool *x509.CertPool +} + +// NewPEMCertPool creates a new, empty, instance of PEMCertPool. +func NewPEMCertPool() *PEMCertPool { + return &PEMCertPool{fingerprintToCertMap: make(map[[sha256.Size]byte]x509.Certificate), certPool: x509.NewCertPool()} +} + +// AddCert adds a certificate to a pool. Uses fingerprint to weed out duplicates. +// cert must not be nil. +func (p *PEMCertPool) AddCert(cert *x509.Certificate) { + fingerprint := sha256.Sum256(cert.Raw) + _, ok := p.fingerprintToCertMap[fingerprint] + + if !ok { + p.fingerprintToCertMap[fingerprint] = *cert + p.certPool.AddCert(cert) + p.rawCerts = append(p.rawCerts, cert) + } +} + +// Included indicates whether the given cert is included in the pool. +func (p *PEMCertPool) Included(cert *x509.Certificate) bool { + fingerprint := sha256.Sum256(cert.Raw) + _, ok := p.fingerprintToCertMap[fingerprint] + return ok +} + +// AppendCertsFromPEM adds certs to the pool from a byte slice assumed to contain PEM encoded data. +// Skips over non certificate blocks in the data. Returns true if all certificates in the +// data were parsed and added to the pool successfully and at least one certificate was found. +func (p *PEMCertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != pemCertificateBlockType || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if x509.IsFatal(err) { + return false + } + + p.AddCert(cert) + ok = true + } + + return +} + +// AppendCertsFromPEMFile adds certs from a file that contains concatenated PEM data. +func (p *PEMCertPool) AppendCertsFromPEMFile(pemFile string) error { + pemData, err := os.ReadFile(pemFile) + if err != nil { + return fmt.Errorf("failed to load PEM certs file: %v", err) + } + + if !p.AppendCertsFromPEM(pemData) { + return errors.New("failed to parse PEM certs file") + } + return nil +} + +// Subjects returns a list of the DER-encoded subjects of all of the certificates in the pool. +func (p *PEMCertPool) Subjects() (res [][]byte) { + return p.certPool.Subjects() +} + +// CertPool returns the underlying CertPool. +func (p *PEMCertPool) CertPool() *x509.CertPool { + return p.certPool +} + +// RawCertificates returns a list of the raw bytes of certificates that are in this pool +func (p *PEMCertPool) RawCertificates() []*x509.Certificate { + return p.rawCerts +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go b/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go new file mode 100644 index 00000000000..e0927292630 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go @@ -0,0 +1,169 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509util + +import ( + "bytes" + "encoding/hex" + "fmt" + "strconv" + + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// RevocationReasonToString generates a string describing a revocation reason code. +func RevocationReasonToString(reason x509.RevocationReasonCode) string { + switch reason { + case x509.Unspecified: + return "Unspecified" + case x509.KeyCompromise: + return "Key Compromise" + case x509.CACompromise: + return "CA Compromise" + case x509.AffiliationChanged: + return "Affiliation Changed" + case x509.Superseded: + return "Superseded" + case x509.CessationOfOperation: + return "Cessation Of Operation" + case x509.CertificateHold: + return "Certificate Hold" + case x509.RemoveFromCRL: + return "Remove From CRL" + case x509.PrivilegeWithdrawn: + return "Privilege Withdrawn" + case x509.AACompromise: + return "AA Compromise" + default: + return strconv.Itoa(int(reason)) + } +} + +// CRLToString generates a string describing the given certificate revocation list. +// The output roughly resembles that from openssl crl -text. +func CRLToString(crl *x509.CertificateList) string { + var result bytes.Buffer + var showCritical = func(critical bool) { + if critical { + result.WriteString(" critical") + } + result.WriteString("\n") + } + result.WriteString("Certificate Revocation List (CRL):\n") + result.WriteString(fmt.Sprintf(" Version: %d (%#x)\n", crl.TBSCertList.Version+1, crl.TBSCertList.Version)) + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", x509.SignatureAlgorithmFromAI(crl.TBSCertList.Signature))) + var issuer pkix.Name + issuer.FillFromRDNSequence(&crl.TBSCertList.Issuer) + result.WriteString(fmt.Sprintf(" Issuer: %v\n", NameToString(issuer))) + result.WriteString(fmt.Sprintf(" Last Update: %v\n", crl.TBSCertList.ThisUpdate)) + result.WriteString(fmt.Sprintf(" Next Update: %v\n", crl.TBSCertList.NextUpdate)) + + if len(crl.TBSCertList.Extensions) > 0 { + result.WriteString(" CRL extensions:\n") + } + + count, critical := OIDInExtensions(x509.OIDExtensionAuthorityKeyId, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Authority Key Identifier:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(crl.TBSCertList.AuthorityKeyID))) + } + count, critical = OIDInExtensions(x509.OIDExtensionIssuerAltName, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Issuer Alt Name:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&crl.TBSCertList.IssuerAltNames))) + } + count, critical = OIDInExtensions(x509.OIDExtensionCRLNumber, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 CRLNumber:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %d\n", crl.TBSCertList.CRLNumber)) + } + count, critical = OIDInExtensions(x509.OIDExtensionDeltaCRLIndicator, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Delta CRL Indicator:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %d\n", crl.TBSCertList.BaseCRLNumber)) + } + count, critical = OIDInExtensions(x509.OIDExtensionIssuingDistributionPoint, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Issuing Distribution Point:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&crl.TBSCertList.IssuingDPFullNames))) + } + count, critical = OIDInExtensions(x509.OIDExtensionFreshestCRL, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" X509v3 Freshest CRL:") + showCritical(critical) + result.WriteString(" Full Name:\n") + var buf bytes.Buffer + for _, pt := range crl.TBSCertList.FreshestCRLDistributionPoint { + commaAppend(&buf, "URI:"+pt) + } + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + } + count, critical = OIDInExtensions(x509.OIDExtensionAuthorityInfoAccess, crl.TBSCertList.Extensions) + if count > 0 { + result.WriteString(" Authority Information Access:") + showCritical(critical) + var issuerBuf bytes.Buffer + for _, issuer := range crl.TBSCertList.IssuingCertificateURL { + commaAppend(&issuerBuf, "URI:"+issuer) + } + if issuerBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" CA Issuers - %v\n", issuerBuf.String())) + } + var ocspBuf bytes.Buffer + for _, ocsp := range crl.TBSCertList.OCSPServer { + commaAppend(&ocspBuf, "URI:"+ocsp) + } + if ocspBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" OCSP - %v\n", ocspBuf.String())) + } + // TODO(drysdale): Display other GeneralName types + } + + result.WriteString("\n") + result.WriteString("Revoked Certificates:\n") + for _, c := range crl.TBSCertList.RevokedCertificates { + result.WriteString(fmt.Sprintf(" Serial Number: %s (0x%s)\n", c.SerialNumber.Text(10), c.SerialNumber.Text(16))) + result.WriteString(fmt.Sprintf(" Revocation Date : %v\n", c.RevocationTime)) + count, critical = OIDInExtensions(x509.OIDExtensionCRLReasons, c.Extensions) + if count > 0 { + result.WriteString(" X509v3 CRL Reason Code:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", RevocationReasonToString(c.RevocationReason))) + } + count, critical = OIDInExtensions(x509.OIDExtensionInvalidityDate, c.Extensions) + if count > 0 { + result.WriteString(" Invalidity Date:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", c.InvalidityDate)) + } + count, critical = OIDInExtensions(x509.OIDExtensionCertificateIssuer, c.Extensions) + if count > 0 { + result.WriteString(" Issuer:") + showCritical(critical) + result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&c.Issuer))) + } + } + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", x509.SignatureAlgorithmFromAI(crl.SignatureAlgorithm))) + appendHexData(&result, crl.SignatureValue.Bytes, 18, " ") + result.WriteString("\n") + + return result.String() +} diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go b/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go new file mode 100644 index 00000000000..d3c20e1aa9e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go @@ -0,0 +1,900 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package x509util includes utility code for working with X.509 +// certificates from the x509 package. +package x509util + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "net" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/gossip/minimal/x509ext" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// OIDForStandardExtension indicates whether oid identifies a standard extension. +// Standard extensions are listed in RFC 5280 (and other RFCs). +func OIDForStandardExtension(oid asn1.ObjectIdentifier) bool { + if oid.Equal(x509.OIDExtensionSubjectKeyId) || + oid.Equal(x509.OIDExtensionKeyUsage) || + oid.Equal(x509.OIDExtensionExtendedKeyUsage) || + oid.Equal(x509.OIDExtensionAuthorityKeyId) || + oid.Equal(x509.OIDExtensionBasicConstraints) || + oid.Equal(x509.OIDExtensionSubjectAltName) || + oid.Equal(x509.OIDExtensionCertificatePolicies) || + oid.Equal(x509.OIDExtensionNameConstraints) || + oid.Equal(x509.OIDExtensionCRLDistributionPoints) || + oid.Equal(x509.OIDExtensionIssuerAltName) || + oid.Equal(x509.OIDExtensionSubjectDirectoryAttributes) || + oid.Equal(x509.OIDExtensionInhibitAnyPolicy) || + oid.Equal(x509.OIDExtensionPolicyConstraints) || + oid.Equal(x509.OIDExtensionPolicyMappings) || + oid.Equal(x509.OIDExtensionFreshestCRL) || + oid.Equal(x509.OIDExtensionSubjectInfoAccess) || + oid.Equal(x509.OIDExtensionAuthorityInfoAccess) || + oid.Equal(x509.OIDExtensionIPPrefixList) || + oid.Equal(x509.OIDExtensionASList) || + oid.Equal(x509.OIDExtensionCTPoison) || + oid.Equal(x509.OIDExtensionCTSCT) { + return true + } + return false +} + +// OIDInExtensions checks whether the extension identified by oid is present in extensions +// and returns how many times it occurs together with an indication of whether any of them +// are marked critical. +func OIDInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) (int, bool) { + count := 0 + critical := false + for _, ext := range extensions { + if ext.Id.Equal(oid) { + count++ + if ext.Critical { + critical = true + } + } + } + return count, critical +} + +// String formatting for various X.509/ASN.1 types +func bitStringToString(b asn1.BitString) string { // nolint:deadcode,unused + result := hex.EncodeToString(b.Bytes) + bitsLeft := b.BitLength % 8 + if bitsLeft != 0 { + result += " (" + strconv.Itoa(8-bitsLeft) + " unused bits)" + } + return result +} + +func publicKeyAlgorithmToString(algo x509.PublicKeyAlgorithm) string { + // Use OpenSSL-compatible strings for the algorithms. + switch algo { + case x509.RSA: + return "rsaEncryption" + case x509.DSA: + return "dsaEncryption" + case x509.ECDSA: + return "id-ecPublicKey" + default: + return strconv.Itoa(int(algo)) + } +} + +// appendHexData adds a hex dump of binary data to buf, with line breaks +// after each set of count bytes, and with each new line prefixed with the +// given prefix. +func appendHexData(buf *bytes.Buffer, data []byte, count int, prefix string) { + for ii, b := range data { + if ii%count == 0 { + if ii > 0 { + buf.WriteString("\n") + } + buf.WriteString(prefix) + } + buf.WriteString(fmt.Sprintf("%02x:", b)) + } +} + +func curveOIDToString(oid asn1.ObjectIdentifier) (t string, bitlen int) { + switch { + case oid.Equal(x509.OIDNamedCurveP224): + return "secp224r1", 224 + case oid.Equal(x509.OIDNamedCurveP256): + return "prime256v1", 256 + case oid.Equal(x509.OIDNamedCurveP384): + return "secp384r1", 384 + case oid.Equal(x509.OIDNamedCurveP521): + return "secp521r1", 521 + case oid.Equal(x509.OIDNamedCurveP192): + return "secp192r1", 192 + } + return fmt.Sprintf("%v", oid), -1 +} + +func publicKeyToString(_ x509.PublicKeyAlgorithm, pub interface{}) string { + var buf bytes.Buffer + switch pub := pub.(type) { + case *rsa.PublicKey: + bitlen := pub.N.BitLen() + buf.WriteString(fmt.Sprintf(" Public Key: (%d bit)\n", bitlen)) + buf.WriteString(" Modulus:\n") + data := pub.N.Bytes() + appendHexData(&buf, data, 15, " ") + buf.WriteString("\n") + buf.WriteString(fmt.Sprintf(" Exponent: %d (0x%x)", pub.E, pub.E)) + case *dsa.PublicKey: + buf.WriteString(" pub:\n") + appendHexData(&buf, pub.Y.Bytes(), 15, " ") + buf.WriteString("\n") + buf.WriteString(" P:\n") + appendHexData(&buf, pub.P.Bytes(), 15, " ") + buf.WriteString("\n") + buf.WriteString(" Q:\n") + appendHexData(&buf, pub.Q.Bytes(), 15, " ") + buf.WriteString("\n") + buf.WriteString(" G:\n") + appendHexData(&buf, pub.G.Bytes(), 15, " ") + case *ecdsa.PublicKey: + data := elliptic.Marshal(pub.Curve, pub.X, pub.Y) + oid, ok := x509.OIDFromNamedCurve(pub.Curve) + if !ok { + return " " + } + oidname, bitlen := curveOIDToString(oid) + buf.WriteString(fmt.Sprintf(" Public Key: (%d bit)\n", bitlen)) + buf.WriteString(" pub:\n") + appendHexData(&buf, data, 15, " ") + buf.WriteString("\n") + buf.WriteString(fmt.Sprintf(" ASN1 OID: %s", oidname)) + default: + buf.WriteString(fmt.Sprintf("%v", pub)) + } + return buf.String() +} + +func commaAppend(buf *bytes.Buffer, s string) { + if buf.Len() > 0 { + buf.WriteString(", ") + } + buf.WriteString(s) +} + +func keyUsageToString(k x509.KeyUsage) string { + var buf bytes.Buffer + if k&x509.KeyUsageDigitalSignature != 0 { + commaAppend(&buf, "Digital Signature") + } + if k&x509.KeyUsageContentCommitment != 0 { + commaAppend(&buf, "Content Commitment") + } + if k&x509.KeyUsageKeyEncipherment != 0 { + commaAppend(&buf, "Key Encipherment") + } + if k&x509.KeyUsageDataEncipherment != 0 { + commaAppend(&buf, "Data Encipherment") + } + if k&x509.KeyUsageKeyAgreement != 0 { + commaAppend(&buf, "Key Agreement") + } + if k&x509.KeyUsageCertSign != 0 { + commaAppend(&buf, "Certificate Signing") + } + if k&x509.KeyUsageCRLSign != 0 { + commaAppend(&buf, "CRL Signing") + } + if k&x509.KeyUsageEncipherOnly != 0 { + commaAppend(&buf, "Encipher Only") + } + if k&x509.KeyUsageDecipherOnly != 0 { + commaAppend(&buf, "Decipher Only") + } + return buf.String() +} + +func extKeyUsageToString(u x509.ExtKeyUsage) string { + switch u { + case x509.ExtKeyUsageAny: + return "Any" + case x509.ExtKeyUsageServerAuth: + return "TLS Web server authentication" + case x509.ExtKeyUsageClientAuth: + return "TLS Web client authentication" + case x509.ExtKeyUsageCodeSigning: + return "Signing of executable code" + case x509.ExtKeyUsageEmailProtection: + return "Email protection" + case x509.ExtKeyUsageIPSECEndSystem: + return "IPSEC end system" + case x509.ExtKeyUsageIPSECTunnel: + return "IPSEC tunnel" + case x509.ExtKeyUsageIPSECUser: + return "IPSEC user" + case x509.ExtKeyUsageTimeStamping: + return "Time stamping" + case x509.ExtKeyUsageOCSPSigning: + return "OCSP signing" + case x509.ExtKeyUsageMicrosoftServerGatedCrypto: + return "Microsoft server gated cryptography" + case x509.ExtKeyUsageNetscapeServerGatedCrypto: + return "Netscape server gated cryptography" + case x509.ExtKeyUsageCertificateTransparency: + return "Certificate transparency" + default: + return "Unknown" + } +} + +func attributeOIDToString(oid asn1.ObjectIdentifier) string { // nolint:deadcode,unused + switch { + case oid.Equal(pkix.OIDCountry): + return "Country" + case oid.Equal(pkix.OIDOrganization): + return "Organization" + case oid.Equal(pkix.OIDOrganizationalUnit): + return "OrganizationalUnit" + case oid.Equal(pkix.OIDCommonName): + return "CommonName" + case oid.Equal(pkix.OIDSerialNumber): + return "SerialNumber" + case oid.Equal(pkix.OIDLocality): + return "Locality" + case oid.Equal(pkix.OIDProvince): + return "Province" + case oid.Equal(pkix.OIDStreetAddress): + return "StreetAddress" + case oid.Equal(pkix.OIDPostalCode): + return "PostalCode" + case oid.Equal(pkix.OIDPseudonym): + return "Pseudonym" + case oid.Equal(pkix.OIDTitle): + return "Title" + case oid.Equal(pkix.OIDDnQualifier): + return "DnQualifier" + case oid.Equal(pkix.OIDName): + return "Name" + case oid.Equal(pkix.OIDSurname): + return "Surname" + case oid.Equal(pkix.OIDGivenName): + return "GivenName" + case oid.Equal(pkix.OIDInitials): + return "Initials" + case oid.Equal(pkix.OIDGenerationQualifier): + return "GenerationQualifier" + default: + return oid.String() + } +} + +// NameToString creates a string description of a pkix.Name object. +func NameToString(name pkix.Name) string { + var result bytes.Buffer + addSingle := func(prefix, item string) { + if len(item) == 0 { + return + } + commaAppend(&result, prefix) + result.WriteString(item) + } + addList := func(prefix string, items []string) { + for _, item := range items { + addSingle(prefix, item) + } + } + addList("C=", name.Country) + addList("O=", name.Organization) + addList("OU=", name.OrganizationalUnit) + addList("L=", name.Locality) + addList("ST=", name.Province) + addList("streetAddress=", name.StreetAddress) + addList("postalCode=", name.PostalCode) + addSingle("serialNumber=", name.SerialNumber) + addSingle("CN=", name.CommonName) + for _, atv := range name.Names { + value, ok := atv.Value.(string) + if !ok { + continue + } + t := atv.Type + // All of the defined attribute OIDs are of the form 2.5.4.N, and OIDAttribute is + // the 2.5.4 prefix ('id-at' in RFC 5280). + if len(t) == 4 && t[0] == pkix.OIDAttribute[0] && t[1] == pkix.OIDAttribute[1] && t[2] == pkix.OIDAttribute[2] { + // OID is 'id-at N', so check the final value to figure out which attribute. + switch t[3] { + case pkix.OIDCommonName[3], pkix.OIDSerialNumber[3], pkix.OIDCountry[3], pkix.OIDLocality[3], pkix.OIDProvince[3], + pkix.OIDStreetAddress[3], pkix.OIDOrganization[3], pkix.OIDOrganizationalUnit[3], pkix.OIDPostalCode[3]: + continue // covered by explicit fields + case pkix.OIDPseudonym[3]: + addSingle("pseudonym=", value) + continue + case pkix.OIDTitle[3]: + addSingle("title=", value) + continue + case pkix.OIDDnQualifier[3]: + addSingle("dnQualifier=", value) + continue + case pkix.OIDName[3]: + addSingle("name=", value) + continue + case pkix.OIDSurname[3]: + addSingle("surname=", value) + continue + case pkix.OIDGivenName[3]: + addSingle("givenName=", value) + continue + case pkix.OIDInitials[3]: + addSingle("initials=", value) + continue + case pkix.OIDGenerationQualifier[3]: + addSingle("generationQualifier=", value) + continue + } + } + addSingle(t.String()+"=", value) + } + return result.String() +} + +// OtherNameToString creates a string description of an x509.OtherName object. +func OtherNameToString(other x509.OtherName) string { + return fmt.Sprintf("%v=%v", other.TypeID, hex.EncodeToString(other.Value.Bytes)) +} + +// GeneralNamesToString creates a string description of an x509.GeneralNames object. +func GeneralNamesToString(gname *x509.GeneralNames) string { + var buf bytes.Buffer + for _, name := range gname.DNSNames { + commaAppend(&buf, "DNS:"+name) + } + for _, email := range gname.EmailAddresses { + commaAppend(&buf, "email:"+email) + } + for _, name := range gname.DirectoryNames { + commaAppend(&buf, "DirName:"+NameToString(name)) + } + for _, uri := range gname.URIs { + commaAppend(&buf, "URI:"+uri) + } + for _, ip := range gname.IPNets { + if ip.Mask == nil { + commaAppend(&buf, "IP Address:"+ip.IP.String()) + } else { + commaAppend(&buf, "IP Address:"+ip.IP.String()+"/"+ip.Mask.String()) + } + } + for _, id := range gname.RegisteredIDs { + commaAppend(&buf, "Registered ID:"+id.String()) + } + for _, other := range gname.OtherNames { + commaAppend(&buf, "othername:"+OtherNameToString(other)) + } + return buf.String() +} + +// CertificateToString generates a string describing the given certificate. +// The output roughly resembles that from openssl x509 -text. +func CertificateToString(cert *x509.Certificate) string { + var result bytes.Buffer + result.WriteString("Certificate:\n") + result.WriteString(" Data:\n") + result.WriteString(fmt.Sprintf(" Version: %d (%#x)\n", cert.Version, cert.Version-1)) + result.WriteString(fmt.Sprintf(" Serial Number: %s (0x%s)\n", cert.SerialNumber.Text(10), cert.SerialNumber.Text(16))) + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", cert.SignatureAlgorithm)) + result.WriteString(fmt.Sprintf(" Issuer: %v\n", NameToString(cert.Issuer))) + result.WriteString(" Validity:\n") + result.WriteString(fmt.Sprintf(" Not Before: %v\n", cert.NotBefore)) + result.WriteString(fmt.Sprintf(" Not After : %v\n", cert.NotAfter)) + result.WriteString(fmt.Sprintf(" Subject: %v\n", NameToString(cert.Subject))) + result.WriteString(" Subject Public Key Info:\n") + result.WriteString(fmt.Sprintf(" Public Key Algorithm: %v\n", publicKeyAlgorithmToString(cert.PublicKeyAlgorithm))) + result.WriteString(fmt.Sprintf("%v\n", publicKeyToString(cert.PublicKeyAlgorithm, cert.PublicKey))) + + if len(cert.Extensions) > 0 { + result.WriteString(" X509v3 extensions:\n") + } + // First display the extensions that are already cracked out + showAuthKeyID(&result, cert) + showSubjectKeyID(&result, cert) + showKeyUsage(&result, cert) + showExtendedKeyUsage(&result, cert) + showBasicConstraints(&result, cert) + showSubjectAltName(&result, cert) + showNameConstraints(&result, cert) + showCertPolicies(&result, cert) + showCRLDPs(&result, cert) + showAuthInfoAccess(&result, cert) + showSubjectInfoAccess(&result, cert) + showRPKIAddressRanges(&result, cert) + showRPKIASIdentifiers(&result, cert) + showCTPoison(&result, cert) + showCTSCT(&result, cert) + showCTLogSTHInfo(&result, cert) + + showUnhandledExtensions(&result, cert) + showSignature(&result, cert) + + return result.String() +} + +func showCritical(result *bytes.Buffer, critical bool) { + if critical { + result.WriteString(" critical") + } + result.WriteString("\n") +} + +func showAuthKeyID(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionAuthorityKeyId, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Authority Key Identifier:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(cert.AuthorityKeyId))) + } +} + +func showSubjectKeyID(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionSubjectKeyId, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Subject Key Identifier:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(cert.SubjectKeyId))) + } +} + +func showKeyUsage(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionKeyUsage, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Key Usage:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" %v\n", keyUsageToString(cert.KeyUsage))) + } +} + +func showExtendedKeyUsage(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionExtendedKeyUsage, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Extended Key Usage:") + showCritical(result, critical) + var usages bytes.Buffer + for _, usage := range cert.ExtKeyUsage { + commaAppend(&usages, extKeyUsageToString(usage)) + } + for _, oid := range cert.UnknownExtKeyUsage { + commaAppend(&usages, oid.String()) + } + result.WriteString(fmt.Sprintf(" %v\n", usages.String())) + } +} + +func showBasicConstraints(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionBasicConstraints, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Basic Constraints:") + showCritical(result, critical) + result.WriteString(fmt.Sprintf(" CA:%t", cert.IsCA)) + if cert.MaxPathLen > 0 || cert.MaxPathLenZero { + result.WriteString(fmt.Sprintf(", pathlen:%d", cert.MaxPathLen)) + } + result.WriteString("\n") + } +} + +func showSubjectAltName(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionSubjectAltName, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Subject Alternative Name:") + showCritical(result, critical) + var buf bytes.Buffer + for _, name := range cert.DNSNames { + commaAppend(&buf, "DNS:"+name) + } + for _, email := range cert.EmailAddresses { + commaAppend(&buf, "email:"+email) + } + for _, ip := range cert.IPAddresses { + commaAppend(&buf, "IP Address:"+ip.String()) + } + + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + // TODO(drysdale): include other name forms + } +} + +func showNameConstraints(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionNameConstraints, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Name Constraints:") + showCritical(result, critical) + if len(cert.PermittedDNSDomains) > 0 { + result.WriteString(" Permitted:\n") + var buf bytes.Buffer + for _, name := range cert.PermittedDNSDomains { + commaAppend(&buf, "DNS:"+name) + } + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + } + // TODO(drysdale): include other name forms + } + +} + +func showCertPolicies(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCertificatePolicies, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 Certificate Policies:") + showCritical(result, critical) + for _, oid := range cert.PolicyIdentifiers { + result.WriteString(fmt.Sprintf(" Policy: %v\n", oid.String())) + // TODO(drysdale): Display any qualifiers associated with the policy + } + } + +} + +func showCRLDPs(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCRLDistributionPoints, cert.Extensions) + if count > 0 { + result.WriteString(" X509v3 CRL Distribution Points:") + showCritical(result, critical) + result.WriteString(" Full Name:\n") + var buf bytes.Buffer + for _, pt := range cert.CRLDistributionPoints { + commaAppend(&buf, "URI:"+pt) + } + result.WriteString(fmt.Sprintf(" %v\n", buf.String())) + // TODO(drysdale): Display other GeneralNames types, plus issuer/reasons/relative-name + } + +} + +func showAuthInfoAccess(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionAuthorityInfoAccess, cert.Extensions) + if count > 0 { + result.WriteString(" Authority Information Access:") + showCritical(result, critical) + var issuerBuf bytes.Buffer + for _, issuer := range cert.IssuingCertificateURL { + commaAppend(&issuerBuf, "URI:"+issuer) + } + if issuerBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" CA Issuers - %v\n", issuerBuf.String())) + } + var ocspBuf bytes.Buffer + for _, ocsp := range cert.OCSPServer { + commaAppend(&ocspBuf, "URI:"+ocsp) + } + if ocspBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" OCSP - %v\n", ocspBuf.String())) + } + // TODO(drysdale): Display other GeneralNames types + } +} + +func showSubjectInfoAccess(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionSubjectInfoAccess, cert.Extensions) + if count > 0 { + result.WriteString(" Subject Information Access:") + showCritical(result, critical) + var tsBuf bytes.Buffer + for _, ts := range cert.SubjectTimestamps { + commaAppend(&tsBuf, "URI:"+ts) + } + if tsBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" AD Time Stamping - %v\n", tsBuf.String())) + } + var repoBuf bytes.Buffer + for _, repo := range cert.SubjectCARepositories { + commaAppend(&repoBuf, "URI:"+repo) + } + if repoBuf.Len() > 0 { + result.WriteString(fmt.Sprintf(" CA repository - %v\n", repoBuf.String())) + } + } +} + +func showAddressRange(prefix x509.IPAddressPrefix, afi uint16) string { + switch afi { + case x509.IPv4AddressFamilyIndicator, x509.IPv6AddressFamilyIndicator: + size := 4 + if afi == x509.IPv6AddressFamilyIndicator { + size = 16 + } + ip := make([]byte, size) + copy(ip, prefix.Bytes) + addr := net.IPNet{IP: ip, Mask: net.CIDRMask(prefix.BitLength, 8*size)} + return addr.String() + default: + return fmt.Sprintf("%x/%d", prefix.Bytes, prefix.BitLength) + } + +} + +func showRPKIAddressRanges(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionIPPrefixList, cert.Extensions) + if count > 0 { + result.WriteString(" sbgp-ipAddrBlock:") + showCritical(result, critical) + for _, blocks := range cert.RPKIAddressRanges { + afi := blocks.AFI + switch afi { + case x509.IPv4AddressFamilyIndicator: + result.WriteString(" IPv4") + case x509.IPv6AddressFamilyIndicator: + result.WriteString(" IPv6") + default: + result.WriteString(fmt.Sprintf(" %d", afi)) + } + if blocks.SAFI != 0 { + result.WriteString(fmt.Sprintf(" SAFI=%d", blocks.SAFI)) + } + result.WriteString(":") + if blocks.InheritFromIssuer { + result.WriteString(" inherit\n") + continue + } + result.WriteString("\n") + for _, prefix := range blocks.AddressPrefixes { + result.WriteString(fmt.Sprintf(" %s\n", showAddressRange(prefix, afi))) + } + for _, ipRange := range blocks.AddressRanges { + result.WriteString(fmt.Sprintf(" [%s, %s]\n", showAddressRange(ipRange.Min, afi), showAddressRange(ipRange.Max, afi))) + } + } + } +} + +func showASIDs(result *bytes.Buffer, asids *x509.ASIdentifiers, label string) { + if asids == nil { + return + } + result.WriteString(fmt.Sprintf(" %s:\n", label)) + if asids.InheritFromIssuer { + result.WriteString(" inherit\n") + return + } + for _, id := range asids.ASIDs { + result.WriteString(fmt.Sprintf(" %d\n", id)) + } + for _, idRange := range asids.ASIDRanges { + result.WriteString(fmt.Sprintf(" %d-%d\n", idRange.Min, idRange.Max)) + } +} + +func showRPKIASIdentifiers(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionASList, cert.Extensions) + if count > 0 { + result.WriteString(" sbgp-autonomousSysNum:") + showCritical(result, critical) + showASIDs(result, cert.RPKIASNumbers, "Autonomous System Numbers") + showASIDs(result, cert.RPKIRoutingDomainIDs, "Routing Domain Identifiers") + } +} +func showCTPoison(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCTPoison, cert.Extensions) + if count > 0 { + result.WriteString(" RFC6962 Pre-Certificate Poison:") + showCritical(result, critical) + result.WriteString(" .....\n") + } +} + +func showCTSCT(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509.OIDExtensionCTSCT, cert.Extensions) + if count > 0 { + result.WriteString(" RFC6962 Certificate Transparency SCT:") + showCritical(result, critical) + for i, sctData := range cert.SCTList.SCTList { + result.WriteString(fmt.Sprintf(" SCT [%d]:\n", i)) + var sct ct.SignedCertificateTimestamp + _, err := tls.Unmarshal(sctData.Val, &sct) + if err != nil { + appendHexData(result, sctData.Val, 16, " ") + result.WriteString("\n") + continue + } + result.WriteString(fmt.Sprintf(" Version: %d\n", sct.SCTVersion)) + result.WriteString(fmt.Sprintf(" LogID: %s\n", base64.StdEncoding.EncodeToString(sct.LogID.KeyID[:]))) + result.WriteString(fmt.Sprintf(" Timestamp: %d\n", sct.Timestamp)) + result.WriteString(fmt.Sprintf(" Signature: %s\n", sct.Signature.Algorithm)) + result.WriteString(" Signature:\n") + appendHexData(result, sct.Signature.Signature, 16, " ") + result.WriteString("\n") + } + } +} + +func showCTLogSTHInfo(result *bytes.Buffer, cert *x509.Certificate) { + count, critical := OIDInExtensions(x509ext.OIDExtensionCTSTH, cert.Extensions) + if count > 0 { + result.WriteString(" Certificate Transparency STH:") + showCritical(result, critical) + sthInfo, err := x509ext.LogSTHInfoFromCert(cert) + if err != nil { + result.WriteString(" Failed to decode STH:\n") + return + } + result.WriteString(fmt.Sprintf(" LogURL: %s\n", string(sthInfo.LogURL))) + result.WriteString(fmt.Sprintf(" Version: %d\n", sthInfo.Version)) + result.WriteString(fmt.Sprintf(" TreeSize: %d\n", sthInfo.TreeSize)) + result.WriteString(fmt.Sprintf(" Timestamp: %d\n", sthInfo.Timestamp)) + result.WriteString(" RootHash:\n") + appendHexData(result, sthInfo.SHA256RootHash[:], 16, " ") + result.WriteString("\n") + result.WriteString(fmt.Sprintf(" TreeHeadSignature: %s\n", sthInfo.TreeHeadSignature.Algorithm)) + appendHexData(result, sthInfo.TreeHeadSignature.Signature, 16, " ") + result.WriteString("\n") + } +} + +func showUnhandledExtensions(result *bytes.Buffer, cert *x509.Certificate) { + for _, ext := range cert.Extensions { + // Skip extensions that are already cracked out + if oidAlreadyPrinted(ext.Id) { + continue + } + result.WriteString(fmt.Sprintf(" %v:", ext.Id)) + showCritical(result, ext.Critical) + appendHexData(result, ext.Value, 16, " ") + result.WriteString("\n") + } +} + +func showSignature(result *bytes.Buffer, cert *x509.Certificate) { + result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", cert.SignatureAlgorithm)) + appendHexData(result, cert.Signature, 18, " ") + result.WriteString("\n") +} + +// TODO(drysdale): remove this once all standard OIDs are parsed and printed. +func oidAlreadyPrinted(oid asn1.ObjectIdentifier) bool { + if oid.Equal(x509.OIDExtensionSubjectKeyId) || + oid.Equal(x509.OIDExtensionKeyUsage) || + oid.Equal(x509.OIDExtensionExtendedKeyUsage) || + oid.Equal(x509.OIDExtensionAuthorityKeyId) || + oid.Equal(x509.OIDExtensionBasicConstraints) || + oid.Equal(x509.OIDExtensionSubjectAltName) || + oid.Equal(x509.OIDExtensionCertificatePolicies) || + oid.Equal(x509.OIDExtensionNameConstraints) || + oid.Equal(x509.OIDExtensionCRLDistributionPoints) || + oid.Equal(x509.OIDExtensionAuthorityInfoAccess) || + oid.Equal(x509.OIDExtensionSubjectInfoAccess) || + oid.Equal(x509.OIDExtensionIPPrefixList) || + oid.Equal(x509.OIDExtensionASList) || + oid.Equal(x509.OIDExtensionCTPoison) || + oid.Equal(x509.OIDExtensionCTSCT) || + oid.Equal(x509ext.OIDExtensionCTSTH) { + return true + } + return false +} + +// CertificateFromPEM takes a certificate in PEM format and returns the +// corresponding x509.Certificate object. +func CertificateFromPEM(pemBytes []byte) (*x509.Certificate, error) { + block, rest := pem.Decode(pemBytes) + if len(rest) != 0 { + return nil, errors.New("trailing data found after PEM block") + } + if block == nil { + return nil, errors.New("PEM block is nil") + } + if block.Type != "CERTIFICATE" { + return nil, errors.New("PEM block is not a CERTIFICATE") + } + return x509.ParseCertificate(block.Bytes) +} + +// CertificatesFromPEM parses one or more certificates from the given PEM data. +// The PEM certificates must be concatenated. This function can be used for +// parsing PEM-formatted certificate chains, but does not verify that the +// resulting chain is a valid certificate chain. +func CertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) { + var chain []*x509.Certificate + for { + var block *pem.Block + block, pemBytes = pem.Decode(pemBytes) + if block == nil { + return chain, nil + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("PEM block is not a CERTIFICATE") + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errors.New("failed to parse certificate") + } + chain = append(chain, cert) + } +} + +// ParseSCTsFromSCTList parses each of the SCTs contained within an SCT list. +func ParseSCTsFromSCTList(sctList *x509.SignedCertificateTimestampList) ([]*ct.SignedCertificateTimestamp, error) { + var scts []*ct.SignedCertificateTimestamp + for i, data := range sctList.SCTList { + sct, err := ExtractSCT(&data) + if err != nil { + return nil, fmt.Errorf("error extracting SCT number %d: %s", i, err) + } + scts = append(scts, sct) + } + return scts, nil +} + +// ExtractSCT deserializes an SCT from a TLS-encoded SCT. +func ExtractSCT(sctData *x509.SerializedSCT) (*ct.SignedCertificateTimestamp, error) { + if sctData == nil { + return nil, errors.New("SCT is nil") + } + var sct ct.SignedCertificateTimestamp + if rest, err := tls.Unmarshal(sctData.Val, &sct); err != nil { + return nil, fmt.Errorf("error parsing SCT: %s", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("extra data (%d bytes) after serialized SCT", len(rest)) + } + return &sct, nil +} + +// MarshalSCTsIntoSCTList serializes SCTs into SCT list. +func MarshalSCTsIntoSCTList(scts []*ct.SignedCertificateTimestamp) (*x509.SignedCertificateTimestampList, error) { + var sctList x509.SignedCertificateTimestampList + sctList.SCTList = []x509.SerializedSCT{} + for i, sct := range scts { + if sct == nil { + return nil, fmt.Errorf("SCT number %d is nil", i) + } + encd, err := tls.Marshal(*sct) + if err != nil { + return nil, fmt.Errorf("error serializing SCT number %d: %s", i, err) + } + sctData := x509.SerializedSCT{Val: encd} + sctList.SCTList = append(sctList.SCTList, sctData) + } + return &sctList, nil +} + +var pemCertificatePrefix = []byte("-----BEGIN CERTIFICATE") + +// ParseSCTsFromCertificate parses any SCTs that are embedded in the +// certificate provided. The certificate bytes provided can be either DER or +// PEM, provided the PEM data starts with the PEM block marker (i.e. has no +// leading text). +func ParseSCTsFromCertificate(certBytes []byte) ([]*ct.SignedCertificateTimestamp, error) { + var cert *x509.Certificate + var err error + if bytes.HasPrefix(certBytes, pemCertificatePrefix) { + cert, err = CertificateFromPEM(certBytes) + } else { + cert, err = x509.ParseCertificate(certBytes) + } + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + return ParseSCTsFromSCTList(&cert.SCTList) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index 960c93b5f41..b62d8482691 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -27,10 +27,11 @@ import ( // docker_version and os.version are not part of the spec but included // for backwards compatibility. type ConfigFile struct { - Architecture string `json:"architecture"` - Author string `json:"author,omitempty"` - Container string `json:"container,omitempty"` - Created Time `json:"created,omitempty"` + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + // Deprecated: This field is deprecated and will be removed in the next release. DockerVersion string `json:"docker_version,omitempty"` History []History `json:"history,omitempty"` OS string `json:"os"` diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE new file mode 100644 index 00000000000..364516251b9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 00000000000..b8fbb2b77c4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@org_golang_google_grpc//grpclog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go new file mode 100644 index 00000000000..3cd9372959d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go @@ -0,0 +1,121 @@ +package httprule + +import ( + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +) + +const ( + opcodeVersion = 1 +) + +// Template is a compiled representation of path templates. +type Template struct { + // Version is the version number of the format. + Version int + // OpCodes is a sequence of operations. + OpCodes []int + // Pool is a constant pool + Pool []string + // Verb is a VERB part in the template. + Verb string + // Fields is a list of field paths bound in this template. + Fields []string + // Original template (example: /v1/a_bit_of_everything) + Template string +} + +// Compiler compiles utilities representation of path templates into marshallable operations. +// They can be unmarshalled by runtime.NewPattern. +type Compiler interface { + Compile() Template +} + +type op struct { + // code is the opcode of the operation + code utilities.OpCode + + // str is a string operand of the code. + // num is ignored if str is not empty. + str string + + // num is a numeric operand of the code. + num int +} + +func (w wildcard) compile() []op { + return []op{ + {code: utilities.OpPush}, + } +} + +func (w deepWildcard) compile() []op { + return []op{ + {code: utilities.OpPushM}, + } +} + +func (l literal) compile() []op { + return []op{ + { + code: utilities.OpLitPush, + str: string(l), + }, + } +} + +func (v variable) compile() []op { + var ops []op + for _, s := range v.segments { + ops = append(ops, s.compile()...) + } + ops = append(ops, op{ + code: utilities.OpConcatN, + num: len(v.segments), + }, op{ + code: utilities.OpCapture, + str: v.path, + }) + + return ops +} + +func (t template) Compile() Template { + var rawOps []op + for _, s := range t.segments { + rawOps = append(rawOps, s.compile()...) + } + + var ( + ops []int + pool []string + fields []string + ) + consts := make(map[string]int) + for _, op := range rawOps { + ops = append(ops, int(op.code)) + if op.str == "" { + ops = append(ops, op.num) + } else { + // eof segment literal represents the "/" path pattern + if op.str == eof { + op.str = "" + } + if _, ok := consts[op.str]; !ok { + consts[op.str] = len(pool) + pool = append(pool, op.str) + } + ops = append(ops, consts[op.str]) + } + if op.code == utilities.OpCapture { + fields = append(fields, op.str) + } + } + return Template{ + Version: opcodeVersion, + OpCodes: ops, + Pool: pool, + Verb: t.verb, + Fields: fields, + Template: t.template, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go new file mode 100644 index 00000000000..c056bd3058a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go @@ -0,0 +1,11 @@ +//go:build gofuzz +// +build gofuzz + +package httprule + +func Fuzz(data []byte) int { + if _, err := Parse(string(data)); err != nil { + return 0 + } + return 0 +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go new file mode 100644 index 00000000000..65ffcf5cf87 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go @@ -0,0 +1,368 @@ +package httprule + +import ( + "errors" + "fmt" + "strings" +) + +// InvalidTemplateError indicates that the path template is not valid. +type InvalidTemplateError struct { + tmpl string + msg string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.tmpl) +} + +// Parse parses the string representation of path template +func Parse(tmpl string) (Compiler, error) { + if !strings.HasPrefix(tmpl, "/") { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"} + } + tokens, verb := tokenize(tmpl[1:]) + + p := parser{tokens: tokens} + segs, err := p.topLevelSegments() + if err != nil { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()} + } + + return template{ + segments: segs, + verb: verb, + template: tmpl, + }, nil +} + +func tokenize(path string) (tokens []string, verb string) { + if path == "" { + return []string{eof}, "" + } + + const ( + init = iota + field + nested + ) + st := init + for path != "" { + var idx int + switch st { + case init: + idx = strings.IndexAny(path, "/{") + case field: + idx = strings.IndexAny(path, ".=}") + case nested: + idx = strings.IndexAny(path, "/}") + } + if idx < 0 { + tokens = append(tokens, path) + break + } + switch r := path[idx]; r { + case '/', '.': + case '{': + st = field + case '=': + st = nested + case '}': + st = init + } + if idx == 0 { + tokens = append(tokens, path[idx:idx+1]) + } else { + tokens = append(tokens, path[:idx], path[idx:idx+1]) + } + path = path[idx+1:] + } + + l := len(tokens) + // See + // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ; + // although normal and backwards-compat logic here is to use the last index + // of a colon, if the final segment is a variable followed by a colon, the + // part following the colon must be a verb. Hence if the previous token is + // an end var marker, we switch the index we're looking for to Index instead + // of LastIndex, so that we correctly grab the remaining part of the path as + // the verb. + var penultimateTokenIsEndVar bool + switch l { + case 0, 1: + // Not enough to be variable so skip this logic and don't result in an + // invalid index + default: + penultimateTokenIsEndVar = tokens[l-2] == "}" + } + t := tokens[l-1] + var idx int + if penultimateTokenIsEndVar { + idx = strings.Index(t, ":") + } else { + idx = strings.LastIndex(t, ":") + } + if idx == 0 { + tokens, verb = tokens[:l-1], t[1:] + } else if idx > 0 { + tokens[l-1], verb = t[:idx], t[idx+1:] + } + tokens = append(tokens, eof) + return tokens, verb +} + +// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto. +type parser struct { + tokens []string + accepted []string +} + +// topLevelSegments is the target of this parser. +func (p *parser) topLevelSegments() ([]segment, error) { + if _, err := p.accept(typeEOF); err == nil { + p.tokens = p.tokens[:0] + return []segment{literal(eof)}, nil + } + segs, err := p.segments() + if err != nil { + return nil, err + } + if _, err := p.accept(typeEOF); err != nil { + return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, "")) + } + return segs, nil +} + +func (p *parser) segments() ([]segment, error) { + s, err := p.segment() + if err != nil { + return nil, err + } + + segs := []segment{s} + for { + if _, err := p.accept("/"); err != nil { + return segs, nil + } + s, err := p.segment() + if err != nil { + return segs, err + } + segs = append(segs, s) + } +} + +func (p *parser) segment() (segment, error) { + if _, err := p.accept("*"); err == nil { + return wildcard{}, nil + } + if _, err := p.accept("**"); err == nil { + return deepWildcard{}, nil + } + if l, err := p.literal(); err == nil { + return l, nil + } + + v, err := p.variable() + if err != nil { + return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err) + } + return v, nil +} + +func (p *parser) literal() (segment, error) { + lit, err := p.accept(typeLiteral) + if err != nil { + return nil, err + } + return literal(lit), nil +} + +func (p *parser) variable() (segment, error) { + if _, err := p.accept("{"); err != nil { + return nil, err + } + + path, err := p.fieldPath() + if err != nil { + return nil, err + } + + var segs []segment + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { + return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err) + } + } else { + segs = []segment{wildcard{}} + } + + if _, err := p.accept("}"); err != nil { + return nil, fmt.Errorf("unterminated variable segment: %s", path) + } + return variable{ + path: path, + segments: segs, + }, nil +} + +func (p *parser) fieldPath() (string, error) { + c, err := p.accept(typeIdent) + if err != nil { + return "", err + } + components := []string{c} + for { + if _, err := p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { + return "", fmt.Errorf("invalid field path component: %w", err) + } + components = append(components, c) + } +} + +// A termType is a type of terminal symbols. +type termType string + +// These constants define some of valid values of termType. +// They improve readability of parse functions. +// +// You can also use "/", "*", "**", "." or "=" as valid values. +const ( + typeIdent = termType("ident") + typeLiteral = termType("literal") + typeEOF = termType("$") +) + +// eof is the terminal symbol which always appears at the end of token sequence. +const eof = "\u0000" + +// accept tries to accept a token in "p". +// This function consumes a token and returns it if it matches to the specified "term". +// If it doesn't match, the function does not consume any tokens and return an error. +func (p *parser) accept(term termType) (string, error) { + t := p.tokens[0] + switch term { + case "/", "*", "**", ".", "=", "{", "}": + if t != string(term) && t != "/" { + return "", fmt.Errorf("expected %q but got %q", term, t) + } + case typeEOF: + if t != eof { + return "", fmt.Errorf("expected EOF but got %q", t) + } + case typeIdent: + if err := expectIdent(t); err != nil { + return "", err + } + case typeLiteral: + if err := expectPChars(t); err != nil { + return "", err + } + default: + return "", fmt.Errorf("unknown termType %q", term) + } + p.tokens = p.tokens[1:] + p.accepted = append(p.accepted, t) + return t, nil +} + +// expectPChars determines if "t" consists of only pchars defined in RFC3986. +// +// https://www.ietf.org/rfc/rfc3986.txt, P.49 +// +// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +// / "*" / "+" / "," / ";" / "=" +// pct-encoded = "%" HEXDIG HEXDIG +func expectPChars(t string) error { + const ( + init = iota + pct1 + pct2 + ) + st := init + for _, r := range t { + if st != init { + if !isHexDigit(r) { + return fmt.Errorf("invalid hexdigit: %c(%U)", r, r) + } + switch st { + case pct1: + st = pct2 + case pct2: + st = init + } + continue + } + + // unreserved + switch { + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case '0' <= r && r <= '9': + continue + } + switch r { + case '-', '.', '_', '~': + // unreserved + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': + // sub-delims + case ':', '@': + // rest of pchar + case '%': + // pct-encoded + st = pct1 + default: + return fmt.Errorf("invalid character in path segment: %q(%U)", r, r) + } + } + if st != init { + return fmt.Errorf("invalid percent-encoding in %q", t) + } + return nil +} + +// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). +func expectIdent(ident string) error { + if ident == "" { + return errors.New("empty identifier") + } + for pos, r := range ident { + switch { + case '0' <= r && r <= '9': + if pos == 0 { + return fmt.Errorf("identifier starting with digit: %s", ident) + } + continue + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case r == '_': + continue + default: + return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident) + } + } + return nil +} + +func isHexDigit(r rune) bool { + switch { + case '0' <= r && r <= '9': + return true + case 'A' <= r && r <= 'F': + return true + case 'a' <= r && r <= 'f': + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go new file mode 100644 index 00000000000..5a814a0004c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go @@ -0,0 +1,60 @@ +package httprule + +import ( + "fmt" + "strings" +) + +type template struct { + segments []segment + verb string + template string +} + +type segment interface { + fmt.Stringer + compile() (ops []op) +} + +type wildcard struct{} + +type deepWildcard struct{} + +type literal string + +type variable struct { + path string + segments []segment +} + +func (wildcard) String() string { + return "*" +} + +func (deepWildcard) String() string { + return "**" +} + +func (l literal) String() string { + return string(l) +} + +func (v variable) String() string { + var segs []string + for _, s := range v.segments { + segs = append(segs, s.String()) + } + return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/")) +} + +func (t template) String() string { + var segs []string + for _, s := range t.segments { + segs = append(segs, s.String()) + } + str := strings.Join(segs, "/") + if t.verb != "" { + str = fmt.Sprintf("%s:%s", str, t.verb) + } + return "/" + str +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel new file mode 100644 index 00000000000..d71991e6e8d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") + +package(default_visibility = ["//visibility:public"]) + +filegroup( + name = "options_proto_files", + srcs = [ + "annotations.proto", + "openapiv2.proto", + ], +) + +go_library( + name = "options", + embed = [":options_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", +) + +proto_library( + name = "options_proto", + srcs = [ + "annotations.proto", + "openapiv2.proto", + ], + deps = [ + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:struct_proto", + ], +) + +go_proto_library( + name = "options_go_proto", + compilers = ["//:go_apiv2"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", + proto = ":options_proto", +) + +alias( + name = "go_default_library", + actual = ":options", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go new file mode 100644 index 00000000000..738c9754a61 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go @@ -0,0 +1,269 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/annotations.proto + +//go:build !protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Swagger)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger", + Tag: "bytes,1042,opt,name=openapiv2_swagger", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation", + Tag: "bytes,1042,opt,name=openapiv2_operation", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema", + Tag: "bytes,1042,opt,name=openapiv2_schema", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*EnumSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum", + Tag: "bytes,1042,opt,name=openapiv2_enum", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*Tag)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag", + Tag: "bytes,1042,opt,name=openapiv2_tag", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*JSONSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field", + Tag: "bytes,1042,opt,name=openapiv2_field", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042; + E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042; + E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042; + E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042; + E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042; + E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042; + E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5] +) + +var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75, + 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74, + 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, + 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions + (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema +} +var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ + 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions + 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions + 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions + 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions + 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions + 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions + 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger + 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation + 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 6, // [6:12] is the sub-list for extension type_name + 0, // [0:6] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() } +func file_protoc_gen_openapiv2_options_annotations_proto_init() { + if File_protoc_gen_openapiv2_options_annotations_proto != nil { + return + } + file_protoc_gen_openapiv2_options_openapiv2_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 6, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs, + ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes, + }.Build() + File_protoc_gen_openapiv2_options_annotations_proto = out.File + file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil + file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto new file mode 100644 index 00000000000..aecc5e709c3 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/descriptor.proto"; +import "protoc-gen-openapiv2/options/openapiv2.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +extend google.protobuf.FileOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Swagger openapiv2_swagger = 1042; +} +extend google.protobuf.MethodOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Operation openapiv2_operation = 1042; +} +extend google.protobuf.MessageOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Schema openapiv2_schema = 1042; +} +extend google.protobuf.EnumOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + EnumSchema openapiv2_enum = 1042; +} +extend google.protobuf.ServiceOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Tag openapiv2_tag = 1042; +} +extend google.protobuf.FieldOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + JSONSchema openapiv2_field = 1042; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go new file mode 100644 index 00000000000..b570167836d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go @@ -0,0 +1,269 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/annotations.proto + +//go:build protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Swagger)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger", + Tag: "bytes,1042,opt,name=openapiv2_swagger", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation", + Tag: "bytes,1042,opt,name=openapiv2_operation", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema", + Tag: "bytes,1042,opt,name=openapiv2_schema", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*EnumSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum", + Tag: "bytes,1042,opt,name=openapiv2_enum", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*Tag)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag", + Tag: "bytes,1042,opt,name=openapiv2_tag", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*JSONSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field", + Tag: "bytes,1042,opt,name=openapiv2_field", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042; + E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042; + E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042; + E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042; + E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042; + E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042; + E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5] +) + +var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75, + 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74, + 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, + 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions + (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema +} +var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ + 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions + 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions + 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions + 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions + 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions + 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions + 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger + 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation + 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 6, // [6:12] is the sub-list for extension type_name + 0, // [0:6] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() } +func file_protoc_gen_openapiv2_options_annotations_proto_init() { + if File_protoc_gen_openapiv2_options_annotations_proto != nil { + return + } + file_protoc_gen_openapiv2_options_openapiv2_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 6, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs, + ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes, + }.Build() + File_protoc_gen_openapiv2_options_annotations_proto = out.File + file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil + file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml new file mode 100644 index 00000000000..07dfb958f1e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml @@ -0,0 +1,7 @@ +version: v2 +plugins: + - remote: buf.build/protocolbuffers/go:v1.36.0 + out: . + opt: + - paths=source_relative + - default_api_level=API_HYBRID diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go new file mode 100644 index 00000000000..3a34e664e0a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go @@ -0,0 +1,4263 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/openapiv2.proto + +//go:build !protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +type Scheme int32 + +const ( + Scheme_UNKNOWN Scheme = 0 + Scheme_HTTP Scheme = 1 + Scheme_HTTPS Scheme = 2 + Scheme_WS Scheme = 3 + Scheme_WSS Scheme = 4 +) + +// Enum value maps for Scheme. +var ( + Scheme_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP", + 2: "HTTPS", + 3: "WS", + 4: "WSS", + } + Scheme_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP": 1, + "HTTPS": 2, + "WS": 3, + "WSS": 4, + } +) + +func (x Scheme) Enum() *Scheme { + p := new(Scheme) + *p = x + return p +} + +func (x Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor() +} + +func (Scheme) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0] +} + +func (x Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Type` is a supported HTTP header type. +// See https://swagger.io/specification/v2/#parameterType. +type HeaderParameter_Type int32 + +const ( + HeaderParameter_UNKNOWN HeaderParameter_Type = 0 + HeaderParameter_STRING HeaderParameter_Type = 1 + HeaderParameter_NUMBER HeaderParameter_Type = 2 + HeaderParameter_INTEGER HeaderParameter_Type = 3 + HeaderParameter_BOOLEAN HeaderParameter_Type = 4 +) + +// Enum value maps for HeaderParameter_Type. +var ( + HeaderParameter_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STRING", + 2: "NUMBER", + 3: "INTEGER", + 4: "BOOLEAN", + } + HeaderParameter_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STRING": 1, + "NUMBER": 2, + "INTEGER": 3, + "BOOLEAN": 4, + } +) + +func (x HeaderParameter_Type) Enum() *HeaderParameter_Type { + p := new(HeaderParameter_Type) + *p = x + return p +} + +func (x HeaderParameter_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor() +} + +func (HeaderParameter_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1] +} + +func (x HeaderParameter_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type JSONSchema_JSONSchemaSimpleTypes int32 + +const ( + JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0 + JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1 + JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2 + JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3 + JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4 + JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5 + JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6 + JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7 +) + +// Enum value maps for JSONSchema_JSONSchemaSimpleTypes. +var ( + JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARRAY", + 2: "BOOLEAN", + 3: "INTEGER", + 4: "NULL", + 5: "NUMBER", + 6: "OBJECT", + 7: "STRING", + } + JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{ + "UNKNOWN": 0, + "ARRAY": 1, + "BOOLEAN": 2, + "INTEGER": 3, + "NULL": 4, + "NUMBER": 5, + "OBJECT": 6, + "STRING": 7, + } +) + +func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes { + p := new(JSONSchema_JSONSchemaSimpleTypes) + *p = x + return p +} + +func (x JSONSchema_JSONSchemaSimpleTypes) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor() +} + +func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2] +} + +func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The type of the security scheme. Valid values are "basic", +// "apiKey" or "oauth2". +type SecurityScheme_Type int32 + +const ( + SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0 + SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1 + SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2 + SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3 +) + +// Enum value maps for SecurityScheme_Type. +var ( + SecurityScheme_Type_name = map[int32]string{ + 0: "TYPE_INVALID", + 1: "TYPE_BASIC", + 2: "TYPE_API_KEY", + 3: "TYPE_OAUTH2", + } + SecurityScheme_Type_value = map[string]int32{ + "TYPE_INVALID": 0, + "TYPE_BASIC": 1, + "TYPE_API_KEY": 2, + "TYPE_OAUTH2": 3, + } +) + +func (x SecurityScheme_Type) Enum() *SecurityScheme_Type { + p := new(SecurityScheme_Type) + *p = x + return p +} + +func (x SecurityScheme_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor() +} + +func (SecurityScheme_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3] +} + +func (x SecurityScheme_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The location of the API key. Valid values are "query" or "header". +type SecurityScheme_In int32 + +const ( + SecurityScheme_IN_INVALID SecurityScheme_In = 0 + SecurityScheme_IN_QUERY SecurityScheme_In = 1 + SecurityScheme_IN_HEADER SecurityScheme_In = 2 +) + +// Enum value maps for SecurityScheme_In. +var ( + SecurityScheme_In_name = map[int32]string{ + 0: "IN_INVALID", + 1: "IN_QUERY", + 2: "IN_HEADER", + } + SecurityScheme_In_value = map[string]int32{ + "IN_INVALID": 0, + "IN_QUERY": 1, + "IN_HEADER": 2, + } +) + +func (x SecurityScheme_In) Enum() *SecurityScheme_In { + p := new(SecurityScheme_In) + *p = x + return p +} + +func (x SecurityScheme_In) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor() +} + +func (SecurityScheme_In) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4] +} + +func (x SecurityScheme_In) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The flow used by the OAuth2 security scheme. Valid values are +// "implicit", "password", "application" or "accessCode". +type SecurityScheme_Flow int32 + +const ( + SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0 + SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1 + SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2 + SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3 + SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4 +) + +// Enum value maps for SecurityScheme_Flow. +var ( + SecurityScheme_Flow_name = map[int32]string{ + 0: "FLOW_INVALID", + 1: "FLOW_IMPLICIT", + 2: "FLOW_PASSWORD", + 3: "FLOW_APPLICATION", + 4: "FLOW_ACCESS_CODE", + } + SecurityScheme_Flow_value = map[string]int32{ + "FLOW_INVALID": 0, + "FLOW_IMPLICIT": 1, + "FLOW_PASSWORD": 2, + "FLOW_APPLICATION": 3, + "FLOW_ACCESS_CODE": 4, + } +) + +func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow { + p := new(SecurityScheme_Flow) + *p = x + return p +} + +func (x SecurityScheme_Flow) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor() +} + +func (SecurityScheme_Flow) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5] +} + +func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +type Swagger struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"` + // Additional external documentation. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Swagger) Reset() { + *x = Swagger{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Swagger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Swagger) ProtoMessage() {} + +func (x *Swagger) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Swagger) GetSwagger() string { + if x != nil { + return x.Swagger + } + return "" +} + +func (x *Swagger) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Swagger) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *Swagger) GetBasePath() string { + if x != nil { + return x.BasePath + } + return "" +} + +func (x *Swagger) GetSchemes() []Scheme { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Swagger) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Swagger) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Swagger) GetResponses() map[string]*Response { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions + } + return nil +} + +func (x *Swagger) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Swagger) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Swagger) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Swagger) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Swagger) SetSwagger(v string) { + x.Swagger = v +} + +func (x *Swagger) SetInfo(v *Info) { + x.Info = v +} + +func (x *Swagger) SetHost(v string) { + x.Host = v +} + +func (x *Swagger) SetBasePath(v string) { + x.BasePath = v +} + +func (x *Swagger) SetSchemes(v []Scheme) { + x.Schemes = v +} + +func (x *Swagger) SetConsumes(v []string) { + x.Consumes = v +} + +func (x *Swagger) SetProduces(v []string) { + x.Produces = v +} + +func (x *Swagger) SetResponses(v map[string]*Response) { + x.Responses = v +} + +func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) { + x.SecurityDefinitions = v +} + +func (x *Swagger) SetSecurity(v []*SecurityRequirement) { + x.Security = v +} + +func (x *Swagger) SetTags(v []*Tag) { + x.Tags = v +} + +func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Swagger) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Swagger) HasInfo() bool { + if x == nil { + return false + } + return x.Info != nil +} + +func (x *Swagger) HasSecurityDefinitions() bool { + if x == nil { + return false + } + return x.SecurityDefinitions != nil +} + +func (x *Swagger) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Swagger) ClearInfo() { + x.Info = nil +} + +func (x *Swagger) ClearSecurityDefinitions() { + x.SecurityDefinitions = nil +} + +func (x *Swagger) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Swagger_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag + // Additional external documentation. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Swagger_builder) Build() *Swagger { + m0 := &Swagger{} + b, x := &b0, m0 + _, _ = b, x + x.Swagger = b.Swagger + x.Info = b.Info + x.Host = b.Host + x.BasePath = b.BasePath + x.Schemes = b.Schemes + x.Consumes = b.Consumes + x.Produces = b.Produces + x.Responses = b.Responses + x.SecurityDefinitions = b.SecurityDefinitions + x.Security = b.Security + x.Tags = b.Tags + x.ExternalDocs = b.ExternalDocs + x.Extensions = b.Extensions + return m0 +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +type Operation struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Operation) Reset() { + *x = Operation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Operation) GetResponses() map[string]*Response { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetSchemes() []Scheme { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Operation) GetParameters() *Parameters { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Operation) SetTags(v []string) { + x.Tags = v +} + +func (x *Operation) SetSummary(v string) { + x.Summary = v +} + +func (x *Operation) SetDescription(v string) { + x.Description = v +} + +func (x *Operation) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Operation) SetOperationId(v string) { + x.OperationId = v +} + +func (x *Operation) SetConsumes(v []string) { + x.Consumes = v +} + +func (x *Operation) SetProduces(v []string) { + x.Produces = v +} + +func (x *Operation) SetResponses(v map[string]*Response) { + x.Responses = v +} + +func (x *Operation) SetSchemes(v []Scheme) { + x.Schemes = v +} + +func (x *Operation) SetDeprecated(v bool) { + x.Deprecated = v +} + +func (x *Operation) SetSecurity(v []*SecurityRequirement) { + x.Security = v +} + +func (x *Operation) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Operation) SetParameters(v *Parameters) { + x.Parameters = v +} + +func (x *Operation) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Operation) HasParameters() bool { + if x == nil { + return false + } + return x.Parameters != nil +} + +func (x *Operation) ClearExternalDocs() { + x.ExternalDocs = nil +} + +func (x *Operation) ClearParameters() { + x.Parameters = nil +} + +type Operation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters +} + +func (b0 Operation_builder) Build() *Operation { + m0 := &Operation{} + b, x := &b0, m0 + _, _ = b, x + x.Tags = b.Tags + x.Summary = b.Summary + x.Description = b.Description + x.ExternalDocs = b.ExternalDocs + x.OperationId = b.OperationId + x.Consumes = b.Consumes + x.Produces = b.Produces + x.Responses = b.Responses + x.Schemes = b.Schemes + x.Deprecated = b.Deprecated + x.Security = b.Security + x.Extensions = b.Extensions + x.Parameters = b.Parameters + return m0 +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +type Parameters struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Parameters) Reset() { + *x = Parameters{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Parameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameters) ProtoMessage() {} + +func (x *Parameters) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Parameters) GetHeaders() []*HeaderParameter { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Parameters) SetHeaders(v []*HeaderParameter) { + x.Headers = v +} + +type Parameters_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter +} + +func (b0 Parameters_builder) Build() *Parameters { + m0 := &Parameters{} + b, x := &b0, m0 + _, _ = b, x + x.Headers = b.Headers + return m0 +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +type HeaderParameter struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Name` is the header name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // `Description` is a short description of the header. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"` + // `Format` The extending format for the previously mentioned type. + Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` + // `Required` indicates if the header is optional + Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderParameter) Reset() { + *x = HeaderParameter{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameter) ProtoMessage() {} + +func (x *HeaderParameter) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *HeaderParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HeaderParameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *HeaderParameter) GetType() HeaderParameter_Type { + if x != nil { + return x.Type + } + return HeaderParameter_UNKNOWN +} + +func (x *HeaderParameter) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *HeaderParameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *HeaderParameter) SetName(v string) { + x.Name = v +} + +func (x *HeaderParameter) SetDescription(v string) { + x.Description = v +} + +func (x *HeaderParameter) SetType(v HeaderParameter_Type) { + x.Type = v +} + +func (x *HeaderParameter) SetFormat(v string) { + x.Format = v +} + +func (x *HeaderParameter) SetRequired(v bool) { + x.Required = v +} + +type HeaderParameter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Name` is the header name. + Name string + // `Description` is a short description of the header. + Description string + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type + // `Format` The extending format for the previously mentioned type. + Format string + // `Required` indicates if the header is optional + Required bool +} + +func (b0 HeaderParameter_builder) Build() *HeaderParameter { + m0 := &HeaderParameter{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Description = b.Description + x.Type = b.Type + x.Format = b.Format + x.Required = b.Required + return m0 +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +type Header struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Description` is a short description of the header. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // `Format` The extending format for the previously mentioned type. + Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"` + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Header) Reset() { + *x = Header{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Header) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Header) SetDescription(v string) { + x.Description = v +} + +func (x *Header) SetType(v string) { + x.Type = v +} + +func (x *Header) SetFormat(v string) { + x.Format = v +} + +func (x *Header) SetDefault(v string) { + x.Default = v +} + +func (x *Header) SetPattern(v string) { + x.Pattern = v +} + +type Header_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the header. + Description string + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string + // `Format` The extending format for the previously mentioned type. + Format string + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string +} + +func (b0 Header_builder) Build() *Header { + m0 := &Header{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Type = b.Type + x.Format = b.Format + x.Default = b.Default + x.Pattern = b.Pattern + return m0 +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +type Response struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Response) Reset() { + *x = Response{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetSchema() *Schema { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Response) GetHeaders() map[string]*Header { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetExamples() map[string]string { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Response) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Response) SetDescription(v string) { + x.Description = v +} + +func (x *Response) SetSchema(v *Schema) { + x.Schema = v +} + +func (x *Response) SetHeaders(v map[string]*Header) { + x.Headers = v +} + +func (x *Response) SetExamples(v map[string]string) { + x.Examples = v +} + +func (x *Response) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Response) HasSchema() bool { + if x == nil { + return false + } + return x.Schema != nil +} + +func (x *Response) ClearSchema() { + x.Schema = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Schema = b.Schema + x.Headers = b.Headers + x.Examples = b.Examples + x.Extensions = b.Extensions + return m0 +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +type Info struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The title of the application. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The Terms of Service for the API. + TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + // The contact information for the exposed API. + Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + // The license information for the exposed API. + License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + // Provides the version of the application API (not to be confused + // with the specification version). + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Info) Reset() { + *x = Info{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Info) SetTitle(v string) { + x.Title = v +} + +func (x *Info) SetDescription(v string) { + x.Description = v +} + +func (x *Info) SetTermsOfService(v string) { + x.TermsOfService = v +} + +func (x *Info) SetContact(v *Contact) { + x.Contact = v +} + +func (x *Info) SetLicense(v *License) { + x.License = v +} + +func (x *Info) SetVersion(v string) { + x.Version = v +} + +func (x *Info) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Info) HasContact() bool { + if x == nil { + return false + } + return x.Contact != nil +} + +func (x *Info) HasLicense() bool { + if x == nil { + return false + } + return x.License != nil +} + +func (x *Info) ClearContact() { + x.Contact = nil +} + +func (x *Info) ClearLicense() { + x.License = nil +} + +type Info_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The title of the application. + Title string + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string + // The Terms of Service for the API. + TermsOfService string + // The contact information for the exposed API. + Contact *Contact + // The license information for the exposed API. + License *License + // Provides the version of the application API (not to be confused + // with the specification version). + Version string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Info_builder) Build() *Info { + m0 := &Info{} + b, x := &b0, m0 + _, _ = b, x + x.Title = b.Title + x.Description = b.Description + x.TermsOfService = b.TermsOfService + x.Contact = b.Contact + x.License = b.License + x.Version = b.Version + x.Extensions = b.Extensions + return m0 +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +type Contact struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Contact) Reset() { + *x = Contact{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Contact) SetName(v string) { + x.Name = v +} + +func (x *Contact) SetUrl(v string) { + x.Url = v +} + +func (x *Contact) SetEmail(v string) { + x.Email = v +} + +type Contact_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The identifying name of the contact person/organization. + Name string + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string +} + +func (b0 Contact_builder) Build() *Contact { + m0 := &Contact{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Url = b.Url + x.Email = b.Email + return m0 +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +type License struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The license name used for the API. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL to the license used for the API. MUST be in the format of a URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *License) Reset() { + *x = License{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *License) SetName(v string) { + x.Name = v +} + +func (x *License) SetUrl(v string) { + x.Url = v +} + +type License_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The license name used for the API. + Name string + // A URL to the license used for the API. MUST be in the format of a URL. + Url string +} + +func (b0 License_builder) Build() *License { + m0 := &License{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Url = b.Url + return m0 +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +type ExternalDocumentation struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalDocumentation) Reset() { + *x = ExternalDocumentation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalDocumentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocumentation) ProtoMessage() {} + +func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalDocumentation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocumentation) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalDocumentation) SetDescription(v string) { + x.Description = v +} + +func (x *ExternalDocumentation) SetUrl(v string) { + x.Url = v +} + +type ExternalDocumentation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string +} + +func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation { + m0 := &ExternalDocumentation{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Url = b.Url + return m0 +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +type Schema struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Schema) Reset() { + *x = Schema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Schema) GetJsonSchema() *JSONSchema { + if x != nil { + return x.JsonSchema + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *Schema) SetJsonSchema(v *JSONSchema) { + x.JsonSchema = v +} + +func (x *Schema) SetDiscriminator(v string) { + x.Discriminator = v +} + +func (x *Schema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *Schema) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Schema) SetExample(v string) { + x.Example = v +} + +func (x *Schema) HasJsonSchema() bool { + if x == nil { + return false + } + return x.JsonSchema != nil +} + +func (x *Schema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Schema) ClearJsonSchema() { + x.JsonSchema = nil +} + +func (x *Schema) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Schema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + JsonSchema *JSONSchema + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string +} + +func (b0 Schema_builder) Build() *Schema { + m0 := &Schema{} + b, x := &b0, m0 + _, _ = b, x + x.JsonSchema = b.JsonSchema + x.Discriminator = b.Discriminator + x.ReadOnly = b.ReadOnly + x.ExternalDocs = b.ExternalDocs + x.Example = b.Example + return m0 +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +type EnumSchema struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A short description of the schema. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + // The title of the schema. + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumSchema) Reset() { + *x = EnumSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumSchema) ProtoMessage() {} + +func (x *EnumSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *EnumSchema) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *EnumSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *EnumSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *EnumSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *EnumSchema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *EnumSchema) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *EnumSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *EnumSchema) SetDescription(v string) { + x.Description = v +} + +func (x *EnumSchema) SetDefault(v string) { + x.Default = v +} + +func (x *EnumSchema) SetTitle(v string) { + x.Title = v +} + +func (x *EnumSchema) SetRequired(v bool) { + x.Required = v +} + +func (x *EnumSchema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *EnumSchema) SetExample(v string) { + x.Example = v +} + +func (x *EnumSchema) SetRef(v string) { + x.Ref = v +} + +func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *EnumSchema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *EnumSchema) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type EnumSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the schema. + Description string + Default string + // The title of the schema. + Title string + Required bool + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + Example string + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 EnumSchema_builder) Build() *EnumSchema { + m0 := &EnumSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Description = b.Description + x.Default = b.Default + x.Title = b.Title + x.Required = b.Required + x.ReadOnly = b.ReadOnly + x.ExternalDocs = b.ExternalDocs + x.Example = b.Example + x.Ref = b.Ref + x.Extensions = b.Extensions + return m0 +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +type JSONSchema struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + // The title of the schema. + Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"` + // A short description of the schema. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"` + ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"` + // Items in 'array' must be unique. + Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"` + Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"` + // `Format` + Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"` + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"` + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema) Reset() { + *x = JSONSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema) ProtoMessage() {} + +func (x *JSONSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *JSONSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *JSONSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *JSONSchema) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *JSONSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *JSONSchema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *JSONSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *JSONSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *JSONSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *JSONSchema) GetMaxLength() uint64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *JSONSchema) GetMinLength() uint64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *JSONSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *JSONSchema) GetMaxItems() uint64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *JSONSchema) GetMinItems() uint64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *JSONSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *JSONSchema) GetMaxProperties() uint64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *JSONSchema) GetMinProperties() uint64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *JSONSchema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *JSONSchema) GetArray() []string { + if x != nil { + return x.Array + } + return nil +} + +func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes { + if x != nil { + return x.Type + } + return nil +} + +func (x *JSONSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *JSONSchema) GetEnum() []string { + if x != nil { + return x.Enum + } + return nil +} + +func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration { + if x != nil { + return x.FieldConfiguration + } + return nil +} + +func (x *JSONSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *JSONSchema) SetRef(v string) { + x.Ref = v +} + +func (x *JSONSchema) SetTitle(v string) { + x.Title = v +} + +func (x *JSONSchema) SetDescription(v string) { + x.Description = v +} + +func (x *JSONSchema) SetDefault(v string) { + x.Default = v +} + +func (x *JSONSchema) SetReadOnly(v bool) { + x.ReadOnly = v +} + +func (x *JSONSchema) SetExample(v string) { + x.Example = v +} + +func (x *JSONSchema) SetMultipleOf(v float64) { + x.MultipleOf = v +} + +func (x *JSONSchema) SetMaximum(v float64) { + x.Maximum = v +} + +func (x *JSONSchema) SetExclusiveMaximum(v bool) { + x.ExclusiveMaximum = v +} + +func (x *JSONSchema) SetMinimum(v float64) { + x.Minimum = v +} + +func (x *JSONSchema) SetExclusiveMinimum(v bool) { + x.ExclusiveMinimum = v +} + +func (x *JSONSchema) SetMaxLength(v uint64) { + x.MaxLength = v +} + +func (x *JSONSchema) SetMinLength(v uint64) { + x.MinLength = v +} + +func (x *JSONSchema) SetPattern(v string) { + x.Pattern = v +} + +func (x *JSONSchema) SetMaxItems(v uint64) { + x.MaxItems = v +} + +func (x *JSONSchema) SetMinItems(v uint64) { + x.MinItems = v +} + +func (x *JSONSchema) SetUniqueItems(v bool) { + x.UniqueItems = v +} + +func (x *JSONSchema) SetMaxProperties(v uint64) { + x.MaxProperties = v +} + +func (x *JSONSchema) SetMinProperties(v uint64) { + x.MinProperties = v +} + +func (x *JSONSchema) SetRequired(v []string) { + x.Required = v +} + +func (x *JSONSchema) SetArray(v []string) { + x.Array = v +} + +func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) { + x.Type = v +} + +func (x *JSONSchema) SetFormat(v string) { + x.Format = v +} + +func (x *JSONSchema) SetEnum(v []string) { + x.Enum = v +} + +func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) { + x.FieldConfiguration = v +} + +func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *JSONSchema) HasFieldConfiguration() bool { + if x == nil { + return false + } + return x.FieldConfiguration != nil +} + +func (x *JSONSchema) ClearFieldConfiguration() { + x.FieldConfiguration = nil +} + +type JSONSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // The title of the schema. + Title string + // A short description of the schema. + Description string + Default string + ReadOnly bool + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string + MultipleOf float64 + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 + ExclusiveMaximum bool + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 + ExclusiveMinimum bool + MaxLength uint64 + MinLength uint64 + Pattern string + MaxItems uint64 + MinItems uint64 + UniqueItems bool + MaxProperties uint64 + MinProperties uint64 + Required []string + // Items in 'array' must be unique. + Array []string + Type []JSONSchema_JSONSchemaSimpleTypes + // `Format` + Format string + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 JSONSchema_builder) Build() *JSONSchema { + m0 := &JSONSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Ref = b.Ref + x.Title = b.Title + x.Description = b.Description + x.Default = b.Default + x.ReadOnly = b.ReadOnly + x.Example = b.Example + x.MultipleOf = b.MultipleOf + x.Maximum = b.Maximum + x.ExclusiveMaximum = b.ExclusiveMaximum + x.Minimum = b.Minimum + x.ExclusiveMinimum = b.ExclusiveMinimum + x.MaxLength = b.MaxLength + x.MinLength = b.MinLength + x.Pattern = b.Pattern + x.MaxItems = b.MaxItems + x.MinItems = b.MinItems + x.UniqueItems = b.UniqueItems + x.MaxProperties = b.MaxProperties + x.MinProperties = b.MinProperties + x.Required = b.Required + x.Array = b.Array + x.Type = b.Type + x.Format = b.Format + x.Enum = b.Enum + x.FieldConfiguration = b.FieldConfiguration + x.Extensions = b.Extensions + return m0 +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +type Tag struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Tag) Reset() { + *x = Tag{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Tag) SetName(v string) { + x.Name = v +} + +func (x *Tag) SetDescription(v string) { + x.Description = v +} + +func (x *Tag) SetExternalDocs(v *ExternalDocumentation) { + x.ExternalDocs = v +} + +func (x *Tag) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *Tag) HasExternalDocs() bool { + if x == nil { + return false + } + return x.ExternalDocs != nil +} + +func (x *Tag) ClearExternalDocs() { + x.ExternalDocs = nil +} + +type Tag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Tag_builder) Build() *Tag { + m0 := &Tag{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Description = b.Description + x.ExternalDocs = b.ExternalDocs + x.Extensions = b.Extensions + return m0 +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +type SecurityDefinitions struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { + if x != nil { + return x.Security + } + return nil +} + +func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) { + x.Security = v +} + +type SecurityDefinitions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme +} + +func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions { + m0 := &SecurityDefinitions{} + b, x := &b0, m0 + _, _ = b, x + x.Security = b.Security + return m0 +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +type SecurityScheme struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"` + // A short description for security scheme. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"` + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"` + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityScheme) GetType() SecurityScheme_Type { + if x != nil { + return x.Type + } + return SecurityScheme_TYPE_INVALID +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecurityScheme) GetIn() SecurityScheme_In { + if x != nil { + return x.In + } + return SecurityScheme_IN_INVALID +} + +func (x *SecurityScheme) GetFlow() SecurityScheme_Flow { + if x != nil { + return x.Flow + } + return SecurityScheme_FLOW_INVALID +} + +func (x *SecurityScheme) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *SecurityScheme) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *SecurityScheme) GetScopes() *Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *SecurityScheme) SetType(v SecurityScheme_Type) { + x.Type = v +} + +func (x *SecurityScheme) SetDescription(v string) { + x.Description = v +} + +func (x *SecurityScheme) SetName(v string) { + x.Name = v +} + +func (x *SecurityScheme) SetIn(v SecurityScheme_In) { + x.In = v +} + +func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) { + x.Flow = v +} + +func (x *SecurityScheme) SetAuthorizationUrl(v string) { + x.AuthorizationUrl = v +} + +func (x *SecurityScheme) SetTokenUrl(v string) { + x.TokenUrl = v +} + +func (x *SecurityScheme) SetScopes(v *Scopes) { + x.Scopes = v +} + +func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) { + x.Extensions = v +} + +func (x *SecurityScheme) HasScopes() bool { + if x == nil { + return false + } + return x.Scopes != nil +} + +func (x *SecurityScheme) ClearScopes() { + x.Scopes = nil +} + +type SecurityScheme_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type + // A short description for security scheme. + Description string + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 SecurityScheme_builder) Build() *SecurityScheme { + m0 := &SecurityScheme{} + b, x := &b0, m0 + _, _ = b, x + x.Type = b.Type + x.Description = b.Description + x.Name = b.Name + x.In = b.In + x.Flow = b.Flow + x.AuthorizationUrl = b.AuthorizationUrl + x.TokenUrl = b.TokenUrl + x.Scopes = b.Scopes + x.Extensions = b.Extensions + return m0 +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +type SecurityRequirement struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue { + if x != nil { + return x.SecurityRequirement + } + return nil +} + +func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) { + x.SecurityRequirement = v +} + +type SecurityRequirement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue +} + +func (b0 SecurityRequirement_builder) Build() *SecurityRequirement { + m0 := &SecurityRequirement{} + b, x := &b0, m0 + _, _ = b, x + x.SecurityRequirement = b.SecurityRequirement + return m0 +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +type Scopes struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Scopes) Reset() { + *x = Scopes{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Scopes) ProtoMessage() {} + +func (x *Scopes) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Scopes) GetScope() map[string]string { + if x != nil { + return x.Scope + } + return nil +} + +func (x *Scopes) SetScope(v map[string]string) { + x.Scope = v +} + +type Scopes_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string +} + +func (b0 Scopes_builder) Build() *Scopes { + m0 := &Scopes{} + b, x := &b0, m0 + _, _ = b, x + x.Scope = b.Scope + return m0 +} + +// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. +// These properties are not defined by OpenAPIv2, but they are used to control the generation. +type JSONSchema_FieldConfiguration struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema_FieldConfiguration) Reset() { + *x = JSONSchema_FieldConfiguration{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema_FieldConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema_FieldConfiguration) ProtoMessage() {} + +func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { + if x != nil { + return x.PathParamName + } + return "" +} + +func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) { + x.PathParamName = v +} + +type JSONSchema_FieldConfiguration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string +} + +func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration { + m0 := &JSONSchema_FieldConfiguration{} + b, x := &b0, m0 + _, _ = b, x + x.PathParamName = b.PathParamName + return m0 +} + +// If the security scheme is of type "oauth2", then the value is a list of +// scope names required for the execution. For other security scheme types, +// the array MUST be empty. +type SecurityRequirement_SecurityRequirementValue struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement_SecurityRequirementValue) Reset() { + *x = SecurityRequirement_SecurityRequirementValue{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement_SecurityRequirementValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {} + +func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { + if x != nil { + return x.Scope + } + return nil +} + +func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) { + x.Scope = v +} + +type SecurityRequirement_SecurityRequirementValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Scope []string +} + +func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue { + m0 := &SecurityRequirement_SecurityRequirementValue{} + b, x := &b0, m0 + _, _ = b, x + x.Scope = b.Scope + return m0 +} + +var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67, + 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07, + 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, + 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, + 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, + 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, + 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, + 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, + 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a, + 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, + 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, + 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, + 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18, + 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04, + 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, + 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a, + 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, + 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, + 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10, + 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, + 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, + 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04, + 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, + 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06, + 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ + (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme + (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters + (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header + (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response + (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info + (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact + (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License + (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes + nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 41: google.protobuf.Value +} +var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ + 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info + 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters + 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact + 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License + 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes + 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value + 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value + 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header + 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value + 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value + 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + 53, // [53:53] is the sub-list for method output_type + 53, // [53:53] is the sub-list for method input_type + 53, // [53:53] is the sub-list for extension type_name + 53, // [53:53] is the sub-list for extension extendee + 0, // [0:53] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() } +func file_protoc_gen_openapiv2_options_openapiv2_proto_init() { + if File_protoc_gen_openapiv2_options_openapiv2_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc, + NumEnums: 6, + NumMessages: 35, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs, + EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes, + MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes, + }.Build() + File_protoc_gen_openapiv2_options_openapiv2_proto = out.File + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto new file mode 100644 index 00000000000..5313f0818ae --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto @@ -0,0 +1,759 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/struct.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +enum Scheme { + UNKNOWN = 0; + HTTP = 1; + HTTPS = 2; + WS = 3; + WSS = 4; +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +// +message Swagger { + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + string swagger = 1; + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info info = 2; + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + string host = 3; + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + string base_path = 4; + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + repeated Scheme schemes = 5; + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string consumes = 6; + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'paths'. + reserved 8; + // field 9 is reserved for 'definitions', which at this time are already + // exposed as and customizable as proto messages. + reserved 9; + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + map responses = 10; + // Security scheme definitions that can be used across the specification. + SecurityDefinitions security_definitions = 11; + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + repeated SecurityRequirement security = 12; + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated Tag tags = 13; + // Additional external documentation. + ExternalDocumentation external_docs = 14; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 15; +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +message Operation { + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated string tags = 1; + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + string summary = 2; + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + string description = 3; + // Additional external documentation for this operation. + ExternalDocumentation external_docs = 4; + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + string operation_id = 5; + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string consumes = 6; + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'parameters'. + reserved 8; + // The list of possible responses as they are returned from executing this + // operation. + map responses = 9; + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + repeated Scheme schemes = 10; + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + bool deprecated = 11; + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + repeated SecurityRequirement security = 12; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 13; + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters parameters = 14; +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +message Parameters { + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + repeated HeaderParameter headers = 1; +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +message HeaderParameter { + // `Type` is a supported HTTP header type. + // See https://swagger.io/specification/v2/#parameterType. + enum Type { + UNKNOWN = 0; + STRING = 1; + NUMBER = 2; + INTEGER = 3; + BOOLEAN = 4; + } + + // `Name` is the header name. + string name = 1; + // `Description` is a short description of the header. + string description = 2; + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type type = 3; + // `Format` The extending format for the previously mentioned type. + string format = 4; + // `Required` indicates if the header is optional + bool required = 5; + // field 6 is reserved for 'items', but in OpenAPI-specific way. + reserved 6; + // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used. + reserved 7; +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +// +message Header { + // `Description` is a short description of the header. + string description = 1; + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + string type = 2; + // `Format` The extending format for the previously mentioned type. + string format = 3; + // field 4 is reserved for 'items', but in OpenAPI-specific way. + reserved 4; + // field 5 is reserved `Collection Format` Determines the format of the array if type array is used. + reserved 5; + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + string default = 6; + // field 7 is reserved for 'maximum'. + reserved 7; + // field 8 is reserved for 'exclusiveMaximum'. + reserved 8; + // field 9 is reserved for 'minimum'. + reserved 9; + // field 10 is reserved for 'exclusiveMinimum'. + reserved 10; + // field 11 is reserved for 'maxLength'. + reserved 11; + // field 12 is reserved for 'minLength'. + reserved 12; + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + string pattern = 13; + // field 14 is reserved for 'maxItems'. + reserved 14; + // field 15 is reserved for 'minItems'. + reserved 15; + // field 16 is reserved for 'uniqueItems'. + reserved 16; + // field 17 is reserved for 'enum'. + reserved 17; + // field 18 is reserved for 'multipleOf'. + reserved 18; +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +// +message Response { + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + string description = 1; + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema schema = 2; + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + map headers = 3; + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + map examples = 4; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 5; +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +// +message Info { + // The title of the application. + string title = 1; + // A short description of the application. GFM syntax can be used for rich + // text representation. + string description = 2; + // The Terms of Service for the API. + string terms_of_service = 3; + // The contact information for the exposed API. + Contact contact = 4; + // The license information for the exposed API. + License license = 5; + // Provides the version of the application API (not to be confused + // with the specification version). + string version = 6; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 7; +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +// +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. MUST be in the format of a + // URL. + string url = 2; + // The email address of the contact person/organization. MUST be in the format + // of an email address. + string email = 3; +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +// +message License { + // The license name used for the API. + string name = 1; + // A URL to the license used for the API. MUST be in the format of a URL. + string url = 2; +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +// +message ExternalDocumentation { + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + string description = 1; + // The URL for the target documentation. Value MUST be in the format + // of a URL. + string url = 2; +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +message Schema { + JSONSchema json_schema = 1; + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + string discriminator = 2; + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + bool read_only = 3; + // field 4 is reserved for 'xml'. + reserved 4; + // Additional external documentation for this schema. + ExternalDocumentation external_docs = 5; + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + string example = 6; +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +// +message EnumSchema { + // A short description of the schema. + string description = 1; + string default = 2; + // The title of the schema. + string title = 3; + bool required = 4; + bool read_only = 5; + // Additional external documentation for this schema. + ExternalDocumentation external_docs = 6; + string example = 7; + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // `ref: ".google.protobuf.Timestamp"`. + string ref = 8; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 9; +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +// +message JSONSchema { + // field 1 is reserved for '$id', omitted from OpenAPI v2. + reserved 1; + // field 2 is reserved for '$schema', omitted from OpenAPI v2. + reserved 2; + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // `ref: ".google.protobuf.Timestamp"`. + string ref = 3; + // field 4 is reserved for '$comment', omitted from OpenAPI v2. + reserved 4; + // The title of the schema. + string title = 5; + // A short description of the schema. + string description = 6; + string default = 7; + bool read_only = 8; + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + string example = 9; + double multiple_of = 10; + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + double maximum = 11; + bool exclusive_maximum = 12; + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + double minimum = 13; + bool exclusive_minimum = 14; + uint64 max_length = 15; + uint64 min_length = 16; + string pattern = 17; + // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2. + reserved 18; + // field 19 is reserved for 'items', but in OpenAPI-specific way. + // TODO(ivucica): add 'items'? + reserved 19; + uint64 max_items = 20; + uint64 min_items = 21; + bool unique_items = 22; + // field 23 is reserved for 'contains', omitted from OpenAPI v2. + reserved 23; + uint64 max_properties = 24; + uint64 min_properties = 25; + repeated string required = 26; + // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific + // way. TODO(ivucica): add 'additionalProperties'? + reserved 27; + // field 28 is reserved for 'definitions', omitted from OpenAPI v2. + reserved 28; + // field 29 is reserved for 'properties', but in OpenAPI-specific way. + // TODO(ivucica): add 'additionalProperties'? + reserved 29; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // patternProperties, dependencies, propertyNames, const + reserved 30 to 33; + // Items in 'array' must be unique. + repeated string array = 34; + + enum JSONSchemaSimpleTypes { + UNKNOWN = 0; + ARRAY = 1; + BOOLEAN = 2; + INTEGER = 3; + NULL = 4; + NUMBER = 5; + OBJECT = 6; + STRING = 7; + } + + repeated JSONSchemaSimpleTypes type = 35; + // `Format` + string format = 36; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: contentMediaType, contentEncoding, if, then, else + reserved 37 to 41; + // field 42 is reserved for 'allOf', but in OpenAPI-specific way. + // TODO(ivucica): add 'allOf'? + reserved 42; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // anyOf, oneOf, not + reserved 43 to 45; + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + repeated string enum = 46; + + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration field_configuration = 1001; + + // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. + // These properties are not defined by OpenAPIv2, but they are used to control the generation. + message FieldConfiguration { + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + string path_param_name = 47; + } + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 48; +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +// +message Tag { + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + string name = 1; + // A short description for the tag. GFM syntax can be used for rich text + // representation. + string description = 2; + // Additional external documentation for this tag. + ExternalDocumentation external_docs = 3; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 4; +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +message SecurityDefinitions { + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + map security = 1; +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +message SecurityScheme { + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + enum Type { + TYPE_INVALID = 0; + TYPE_BASIC = 1; + TYPE_API_KEY = 2; + TYPE_OAUTH2 = 3; + } + + // The location of the API key. Valid values are "query" or "header". + enum In { + IN_INVALID = 0; + IN_QUERY = 1; + IN_HEADER = 2; + } + + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + enum Flow { + FLOW_INVALID = 0; + FLOW_IMPLICIT = 1; + FLOW_PASSWORD = 2; + FLOW_APPLICATION = 3; + FLOW_ACCESS_CODE = 4; + } + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type type = 1; + // A short description for security scheme. + string description = 2; + // The name of the header or query parameter to be used. + // Valid for apiKey. + string name = 3; + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In in = 4; + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow flow = 5; + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + string authorization_url = 6; + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + string token_url = 7; + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes scopes = 8; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 9; +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +message SecurityRequirement { + // If the security scheme is of type "oauth2", then the value is a list of + // scope names required for the execution. For other security scheme types, + // the array MUST be empty. + message SecurityRequirementValue { + repeated string scope = 1; + } + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + map security_requirement = 1; +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +message Scopes { + // Maps between a name of a scope to a short description of it (as the value + // of the property). + map scope = 1; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go new file mode 100644 index 00000000000..1f0e0c26916 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go @@ -0,0 +1,4055 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.0 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/openapiv2.proto + +//go:build protoopaque + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +type Scheme int32 + +const ( + Scheme_UNKNOWN Scheme = 0 + Scheme_HTTP Scheme = 1 + Scheme_HTTPS Scheme = 2 + Scheme_WS Scheme = 3 + Scheme_WSS Scheme = 4 +) + +// Enum value maps for Scheme. +var ( + Scheme_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP", + 2: "HTTPS", + 3: "WS", + 4: "WSS", + } + Scheme_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP": 1, + "HTTPS": 2, + "WS": 3, + "WSS": 4, + } +) + +func (x Scheme) Enum() *Scheme { + p := new(Scheme) + *p = x + return p +} + +func (x Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor() +} + +func (Scheme) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0] +} + +func (x Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Type` is a supported HTTP header type. +// See https://swagger.io/specification/v2/#parameterType. +type HeaderParameter_Type int32 + +const ( + HeaderParameter_UNKNOWN HeaderParameter_Type = 0 + HeaderParameter_STRING HeaderParameter_Type = 1 + HeaderParameter_NUMBER HeaderParameter_Type = 2 + HeaderParameter_INTEGER HeaderParameter_Type = 3 + HeaderParameter_BOOLEAN HeaderParameter_Type = 4 +) + +// Enum value maps for HeaderParameter_Type. +var ( + HeaderParameter_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STRING", + 2: "NUMBER", + 3: "INTEGER", + 4: "BOOLEAN", + } + HeaderParameter_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STRING": 1, + "NUMBER": 2, + "INTEGER": 3, + "BOOLEAN": 4, + } +) + +func (x HeaderParameter_Type) Enum() *HeaderParameter_Type { + p := new(HeaderParameter_Type) + *p = x + return p +} + +func (x HeaderParameter_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor() +} + +func (HeaderParameter_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1] +} + +func (x HeaderParameter_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type JSONSchema_JSONSchemaSimpleTypes int32 + +const ( + JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0 + JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1 + JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2 + JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3 + JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4 + JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5 + JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6 + JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7 +) + +// Enum value maps for JSONSchema_JSONSchemaSimpleTypes. +var ( + JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARRAY", + 2: "BOOLEAN", + 3: "INTEGER", + 4: "NULL", + 5: "NUMBER", + 6: "OBJECT", + 7: "STRING", + } + JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{ + "UNKNOWN": 0, + "ARRAY": 1, + "BOOLEAN": 2, + "INTEGER": 3, + "NULL": 4, + "NUMBER": 5, + "OBJECT": 6, + "STRING": 7, + } +) + +func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes { + p := new(JSONSchema_JSONSchemaSimpleTypes) + *p = x + return p +} + +func (x JSONSchema_JSONSchemaSimpleTypes) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor() +} + +func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2] +} + +func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The type of the security scheme. Valid values are "basic", +// "apiKey" or "oauth2". +type SecurityScheme_Type int32 + +const ( + SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0 + SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1 + SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2 + SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3 +) + +// Enum value maps for SecurityScheme_Type. +var ( + SecurityScheme_Type_name = map[int32]string{ + 0: "TYPE_INVALID", + 1: "TYPE_BASIC", + 2: "TYPE_API_KEY", + 3: "TYPE_OAUTH2", + } + SecurityScheme_Type_value = map[string]int32{ + "TYPE_INVALID": 0, + "TYPE_BASIC": 1, + "TYPE_API_KEY": 2, + "TYPE_OAUTH2": 3, + } +) + +func (x SecurityScheme_Type) Enum() *SecurityScheme_Type { + p := new(SecurityScheme_Type) + *p = x + return p +} + +func (x SecurityScheme_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor() +} + +func (SecurityScheme_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3] +} + +func (x SecurityScheme_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The location of the API key. Valid values are "query" or "header". +type SecurityScheme_In int32 + +const ( + SecurityScheme_IN_INVALID SecurityScheme_In = 0 + SecurityScheme_IN_QUERY SecurityScheme_In = 1 + SecurityScheme_IN_HEADER SecurityScheme_In = 2 +) + +// Enum value maps for SecurityScheme_In. +var ( + SecurityScheme_In_name = map[int32]string{ + 0: "IN_INVALID", + 1: "IN_QUERY", + 2: "IN_HEADER", + } + SecurityScheme_In_value = map[string]int32{ + "IN_INVALID": 0, + "IN_QUERY": 1, + "IN_HEADER": 2, + } +) + +func (x SecurityScheme_In) Enum() *SecurityScheme_In { + p := new(SecurityScheme_In) + *p = x + return p +} + +func (x SecurityScheme_In) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor() +} + +func (SecurityScheme_In) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4] +} + +func (x SecurityScheme_In) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// The flow used by the OAuth2 security scheme. Valid values are +// "implicit", "password", "application" or "accessCode". +type SecurityScheme_Flow int32 + +const ( + SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0 + SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1 + SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2 + SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3 + SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4 +) + +// Enum value maps for SecurityScheme_Flow. +var ( + SecurityScheme_Flow_name = map[int32]string{ + 0: "FLOW_INVALID", + 1: "FLOW_IMPLICIT", + 2: "FLOW_PASSWORD", + 3: "FLOW_APPLICATION", + 4: "FLOW_ACCESS_CODE", + } + SecurityScheme_Flow_value = map[string]int32{ + "FLOW_INVALID": 0, + "FLOW_IMPLICIT": 1, + "FLOW_PASSWORD": 2, + "FLOW_APPLICATION": 3, + "FLOW_ACCESS_CODE": 4, + } +) + +func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow { + p := new(SecurityScheme_Flow) + *p = x + return p +} + +func (x SecurityScheme_Flow) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor() +} + +func (SecurityScheme_Flow) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5] +} + +func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +type Swagger struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + xxx_hidden_Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + xxx_hidden_Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + xxx_hidden_BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + xxx_hidden_Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + xxx_hidden_Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + xxx_hidden_Tags *[]*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Swagger) Reset() { + *x = Swagger{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Swagger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Swagger) ProtoMessage() {} + +func (x *Swagger) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Swagger) GetSwagger() string { + if x != nil { + return x.xxx_hidden_Swagger + } + return "" +} + +func (x *Swagger) GetInfo() *Info { + if x != nil { + return x.xxx_hidden_Info + } + return nil +} + +func (x *Swagger) GetHost() string { + if x != nil { + return x.xxx_hidden_Host + } + return "" +} + +func (x *Swagger) GetBasePath() string { + if x != nil { + return x.xxx_hidden_BasePath + } + return "" +} + +func (x *Swagger) GetSchemes() []Scheme { + if x != nil { + return x.xxx_hidden_Schemes + } + return nil +} + +func (x *Swagger) GetConsumes() []string { + if x != nil { + return x.xxx_hidden_Consumes + } + return nil +} + +func (x *Swagger) GetProduces() []string { + if x != nil { + return x.xxx_hidden_Produces + } + return nil +} + +func (x *Swagger) GetResponses() map[string]*Response { + if x != nil { + return x.xxx_hidden_Responses + } + return nil +} + +func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.xxx_hidden_SecurityDefinitions + } + return nil +} + +func (x *Swagger) GetSecurity() []*SecurityRequirement { + if x != nil { + if x.xxx_hidden_Security != nil { + return *x.xxx_hidden_Security + } + } + return nil +} + +func (x *Swagger) GetTags() []*Tag { + if x != nil { + if x.xxx_hidden_Tags != nil { + return *x.xxx_hidden_Tags + } + } + return nil +} + +func (x *Swagger) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Swagger) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Swagger) SetSwagger(v string) { + x.xxx_hidden_Swagger = v +} + +func (x *Swagger) SetInfo(v *Info) { + x.xxx_hidden_Info = v +} + +func (x *Swagger) SetHost(v string) { + x.xxx_hidden_Host = v +} + +func (x *Swagger) SetBasePath(v string) { + x.xxx_hidden_BasePath = v +} + +func (x *Swagger) SetSchemes(v []Scheme) { + x.xxx_hidden_Schemes = v +} + +func (x *Swagger) SetConsumes(v []string) { + x.xxx_hidden_Consumes = v +} + +func (x *Swagger) SetProduces(v []string) { + x.xxx_hidden_Produces = v +} + +func (x *Swagger) SetResponses(v map[string]*Response) { + x.xxx_hidden_Responses = v +} + +func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) { + x.xxx_hidden_SecurityDefinitions = v +} + +func (x *Swagger) SetSecurity(v []*SecurityRequirement) { + x.xxx_hidden_Security = &v +} + +func (x *Swagger) SetTags(v []*Tag) { + x.xxx_hidden_Tags = &v +} + +func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Swagger) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Swagger) HasInfo() bool { + if x == nil { + return false + } + return x.xxx_hidden_Info != nil +} + +func (x *Swagger) HasSecurityDefinitions() bool { + if x == nil { + return false + } + return x.xxx_hidden_SecurityDefinitions != nil +} + +func (x *Swagger) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Swagger) ClearInfo() { + x.xxx_hidden_Info = nil +} + +func (x *Swagger) ClearSecurityDefinitions() { + x.xxx_hidden_SecurityDefinitions = nil +} + +func (x *Swagger) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Swagger_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag + // Additional external documentation. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Swagger_builder) Build() *Swagger { + m0 := &Swagger{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Swagger = b.Swagger + x.xxx_hidden_Info = b.Info + x.xxx_hidden_Host = b.Host + x.xxx_hidden_BasePath = b.BasePath + x.xxx_hidden_Schemes = b.Schemes + x.xxx_hidden_Consumes = b.Consumes + x.xxx_hidden_Produces = b.Produces + x.xxx_hidden_Responses = b.Responses + x.xxx_hidden_SecurityDefinitions = b.SecurityDefinitions + x.xxx_hidden_Security = &b.Security + x.xxx_hidden_Tags = &b.Tags + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +type Operation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + xxx_hidden_Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + xxx_hidden_Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + xxx_hidden_Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Operation) Reset() { + *x = Operation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.xxx_hidden_Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.xxx_hidden_Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.xxx_hidden_OperationId + } + return "" +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.xxx_hidden_Consumes + } + return nil +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.xxx_hidden_Produces + } + return nil +} + +func (x *Operation) GetResponses() map[string]*Response { + if x != nil { + return x.xxx_hidden_Responses + } + return nil +} + +func (x *Operation) GetSchemes() []Scheme { + if x != nil { + return x.xxx_hidden_Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.xxx_hidden_Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + if x.xxx_hidden_Security != nil { + return *x.xxx_hidden_Security + } + } + return nil +} + +func (x *Operation) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Operation) GetParameters() *Parameters { + if x != nil { + return x.xxx_hidden_Parameters + } + return nil +} + +func (x *Operation) SetTags(v []string) { + x.xxx_hidden_Tags = v +} + +func (x *Operation) SetSummary(v string) { + x.xxx_hidden_Summary = v +} + +func (x *Operation) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Operation) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Operation) SetOperationId(v string) { + x.xxx_hidden_OperationId = v +} + +func (x *Operation) SetConsumes(v []string) { + x.xxx_hidden_Consumes = v +} + +func (x *Operation) SetProduces(v []string) { + x.xxx_hidden_Produces = v +} + +func (x *Operation) SetResponses(v map[string]*Response) { + x.xxx_hidden_Responses = v +} + +func (x *Operation) SetSchemes(v []Scheme) { + x.xxx_hidden_Schemes = v +} + +func (x *Operation) SetDeprecated(v bool) { + x.xxx_hidden_Deprecated = v +} + +func (x *Operation) SetSecurity(v []*SecurityRequirement) { + x.xxx_hidden_Security = &v +} + +func (x *Operation) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Operation) SetParameters(v *Parameters) { + x.xxx_hidden_Parameters = v +} + +func (x *Operation) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Operation) HasParameters() bool { + if x == nil { + return false + } + return x.xxx_hidden_Parameters != nil +} + +func (x *Operation) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +func (x *Operation) ClearParameters() { + x.xxx_hidden_Parameters = nil +} + +type Operation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters +} + +func (b0 Operation_builder) Build() *Operation { + m0 := &Operation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Tags = b.Tags + x.xxx_hidden_Summary = b.Summary + x.xxx_hidden_Description = b.Description + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_OperationId = b.OperationId + x.xxx_hidden_Consumes = b.Consumes + x.xxx_hidden_Produces = b.Produces + x.xxx_hidden_Responses = b.Responses + x.xxx_hidden_Schemes = b.Schemes + x.xxx_hidden_Deprecated = b.Deprecated + x.xxx_hidden_Security = &b.Security + x.xxx_hidden_Extensions = b.Extensions + x.xxx_hidden_Parameters = b.Parameters + return m0 +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +type Parameters struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Headers *[]*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Parameters) Reset() { + *x = Parameters{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Parameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameters) ProtoMessage() {} + +func (x *Parameters) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Parameters) GetHeaders() []*HeaderParameter { + if x != nil { + if x.xxx_hidden_Headers != nil { + return *x.xxx_hidden_Headers + } + } + return nil +} + +func (x *Parameters) SetHeaders(v []*HeaderParameter) { + x.xxx_hidden_Headers = &v +} + +type Parameters_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter +} + +func (b0 Parameters_builder) Build() *Parameters { + m0 := &Parameters{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Headers = &b.Headers + return m0 +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +type HeaderParameter struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderParameter) Reset() { + *x = HeaderParameter{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameter) ProtoMessage() {} + +func (x *HeaderParameter) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *HeaderParameter) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *HeaderParameter) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *HeaderParameter) GetType() HeaderParameter_Type { + if x != nil { + return x.xxx_hidden_Type + } + return HeaderParameter_UNKNOWN +} + +func (x *HeaderParameter) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *HeaderParameter) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *HeaderParameter) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *HeaderParameter) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *HeaderParameter) SetType(v HeaderParameter_Type) { + x.xxx_hidden_Type = v +} + +func (x *HeaderParameter) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *HeaderParameter) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +type HeaderParameter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Name` is the header name. + Name string + // `Description` is a short description of the header. + Description string + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type + // `Format` The extending format for the previously mentioned type. + Format string + // `Required` indicates if the header is optional + Required bool +} + +func (b0 HeaderParameter_builder) Build() *HeaderParameter { + m0 := &HeaderParameter{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Required = b.Required + return m0 +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +type Header struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Header) Reset() { + *x = Header{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Header) GetType() string { + if x != nil { + return x.xxx_hidden_Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *Header) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.xxx_hidden_Pattern + } + return "" +} + +func (x *Header) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Header) SetType(v string) { + x.xxx_hidden_Type = v +} + +func (x *Header) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *Header) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *Header) SetPattern(v string) { + x.xxx_hidden_Pattern = v +} + +type Header_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the header. + Description string + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string + // `Format` The extending format for the previously mentioned type. + Format string + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string +} + +func (b0 Header_builder) Build() *Header { + m0 := &Header{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Default = b.Default + x.xxx_hidden_Pattern = b.Pattern + return m0 +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +type Response struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + xxx_hidden_Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Response) Reset() { + *x = Response{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Response) GetSchema() *Schema { + if x != nil { + return x.xxx_hidden_Schema + } + return nil +} + +func (x *Response) GetHeaders() map[string]*Header { + if x != nil { + return x.xxx_hidden_Headers + } + return nil +} + +func (x *Response) GetExamples() map[string]string { + if x != nil { + return x.xxx_hidden_Examples + } + return nil +} + +func (x *Response) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Response) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Response) SetSchema(v *Schema) { + x.xxx_hidden_Schema = v +} + +func (x *Response) SetHeaders(v map[string]*Header) { + x.xxx_hidden_Headers = v +} + +func (x *Response) SetExamples(v map[string]string) { + x.xxx_hidden_Examples = v +} + +func (x *Response) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Response) HasSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_Schema != nil +} + +func (x *Response) ClearSchema() { + x.xxx_hidden_Schema = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_Headers = b.Headers + x.xxx_hidden_Examples = b.Examples + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +type Info struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + xxx_hidden_Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + xxx_hidden_License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + xxx_hidden_Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Info) Reset() { + *x = Info{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.xxx_hidden_TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.xxx_hidden_Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.xxx_hidden_License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.xxx_hidden_Version + } + return "" +} + +func (x *Info) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Info) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *Info) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Info) SetTermsOfService(v string) { + x.xxx_hidden_TermsOfService = v +} + +func (x *Info) SetContact(v *Contact) { + x.xxx_hidden_Contact = v +} + +func (x *Info) SetLicense(v *License) { + x.xxx_hidden_License = v +} + +func (x *Info) SetVersion(v string) { + x.xxx_hidden_Version = v +} + +func (x *Info) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Info) HasContact() bool { + if x == nil { + return false + } + return x.xxx_hidden_Contact != nil +} + +func (x *Info) HasLicense() bool { + if x == nil { + return false + } + return x.xxx_hidden_License != nil +} + +func (x *Info) ClearContact() { + x.xxx_hidden_Contact = nil +} + +func (x *Info) ClearLicense() { + x.xxx_hidden_License = nil +} + +type Info_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The title of the application. + Title string + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string + // The Terms of Service for the API. + TermsOfService string + // The contact information for the exposed API. + Contact *Contact + // The license information for the exposed API. + License *License + // Provides the version of the application API (not to be confused + // with the specification version). + Version string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Info_builder) Build() *Info { + m0 := &Info{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Description = b.Description + x.xxx_hidden_TermsOfService = b.TermsOfService + x.xxx_hidden_Contact = b.Contact + x.xxx_hidden_License = b.License + x.xxx_hidden_Version = b.Version + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +type Contact struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + xxx_hidden_Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Contact) Reset() { + *x = Contact{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Contact) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.xxx_hidden_Email + } + return "" +} + +func (x *Contact) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Contact) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +func (x *Contact) SetEmail(v string) { + x.xxx_hidden_Email = v +} + +type Contact_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The identifying name of the contact person/organization. + Name string + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string +} + +func (b0 Contact_builder) Build() *Contact { + m0 := &Contact{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Url = b.Url + x.xxx_hidden_Email = b.Email + return m0 +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +type License struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *License) Reset() { + *x = License{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *License) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *License) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *License) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +type License_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The license name used for the API. + Name string + // A URL to the license used for the API. MUST be in the format of a URL. + Url string +} + +func (b0 License_builder) Build() *License { + m0 := &License{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Url = b.Url + return m0 +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +type ExternalDocumentation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalDocumentation) Reset() { + *x = ExternalDocumentation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalDocumentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocumentation) ProtoMessage() {} + +func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalDocumentation) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *ExternalDocumentation) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *ExternalDocumentation) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *ExternalDocumentation) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +type ExternalDocumentation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string +} + +func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation { + m0 := &ExternalDocumentation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Url = b.Url + return m0 +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +type Schema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` + xxx_hidden_Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Schema) Reset() { + *x = Schema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Schema) GetJsonSchema() *JSONSchema { + if x != nil { + return x.xxx_hidden_JsonSchema + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.xxx_hidden_Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *Schema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *Schema) SetJsonSchema(v *JSONSchema) { + x.xxx_hidden_JsonSchema = v +} + +func (x *Schema) SetDiscriminator(v string) { + x.xxx_hidden_Discriminator = v +} + +func (x *Schema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *Schema) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Schema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *Schema) HasJsonSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_JsonSchema != nil +} + +func (x *Schema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Schema) ClearJsonSchema() { + x.xxx_hidden_JsonSchema = nil +} + +func (x *Schema) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Schema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + JsonSchema *JSONSchema + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string +} + +func (b0 Schema_builder) Build() *Schema { + m0 := &Schema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_JsonSchema = b.JsonSchema + x.xxx_hidden_Discriminator = b.Discriminator + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Example = b.Example + return m0 +} + +// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object. +// Only fields that are applicable to Enums are included +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = { +// ... +// title: "MyEnum"; +// description:"This is my nice enum"; +// example: "ZERO"; +// required: true; +// ... +// }; +type EnumSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + xxx_hidden_Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnumSchema) Reset() { + *x = EnumSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnumSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumSchema) ProtoMessage() {} + +func (x *EnumSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EnumSchema) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *EnumSchema) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *EnumSchema) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *EnumSchema) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *EnumSchema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *EnumSchema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *EnumSchema) GetRef() string { + if x != nil { + return x.xxx_hidden_Ref + } + return "" +} + +func (x *EnumSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *EnumSchema) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *EnumSchema) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *EnumSchema) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *EnumSchema) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +func (x *EnumSchema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *EnumSchema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *EnumSchema) SetRef(v string) { + x.xxx_hidden_Ref = v +} + +func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *EnumSchema) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *EnumSchema) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type EnumSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A short description of the schema. + Description string + Default string + // The title of the schema. + Title string + Required bool + ReadOnly bool + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation + Example string + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 EnumSchema_builder) Build() *EnumSchema { + m0 := &EnumSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Default = b.Default + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Required = b.Required + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Example = b.Example + x.xxx_hidden_Ref = b.Ref + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +type JSONSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + xxx_hidden_Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"` + xxx_hidden_ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + xxx_hidden_Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + xxx_hidden_MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + xxx_hidden_Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + xxx_hidden_ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + xxx_hidden_Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + xxx_hidden_ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + xxx_hidden_MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + xxx_hidden_MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + xxx_hidden_Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + xxx_hidden_MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + xxx_hidden_MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + xxx_hidden_UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + xxx_hidden_MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + xxx_hidden_MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + xxx_hidden_Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"` + xxx_hidden_Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"` + xxx_hidden_Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"` + xxx_hidden_Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"` + xxx_hidden_Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"` + xxx_hidden_FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema) Reset() { + *x = JSONSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema) ProtoMessage() {} + +func (x *JSONSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema) GetRef() string { + if x != nil { + return x.xxx_hidden_Ref + } + return "" +} + +func (x *JSONSchema) GetTitle() string { + if x != nil { + return x.xxx_hidden_Title + } + return "" +} + +func (x *JSONSchema) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *JSONSchema) GetDefault() string { + if x != nil { + return x.xxx_hidden_Default + } + return "" +} + +func (x *JSONSchema) GetReadOnly() bool { + if x != nil { + return x.xxx_hidden_ReadOnly + } + return false +} + +func (x *JSONSchema) GetExample() string { + if x != nil { + return x.xxx_hidden_Example + } + return "" +} + +func (x *JSONSchema) GetMultipleOf() float64 { + if x != nil { + return x.xxx_hidden_MultipleOf + } + return 0 +} + +func (x *JSONSchema) GetMaximum() float64 { + if x != nil { + return x.xxx_hidden_Maximum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.xxx_hidden_ExclusiveMaximum + } + return false +} + +func (x *JSONSchema) GetMinimum() float64 { + if x != nil { + return x.xxx_hidden_Minimum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.xxx_hidden_ExclusiveMinimum + } + return false +} + +func (x *JSONSchema) GetMaxLength() uint64 { + if x != nil { + return x.xxx_hidden_MaxLength + } + return 0 +} + +func (x *JSONSchema) GetMinLength() uint64 { + if x != nil { + return x.xxx_hidden_MinLength + } + return 0 +} + +func (x *JSONSchema) GetPattern() string { + if x != nil { + return x.xxx_hidden_Pattern + } + return "" +} + +func (x *JSONSchema) GetMaxItems() uint64 { + if x != nil { + return x.xxx_hidden_MaxItems + } + return 0 +} + +func (x *JSONSchema) GetMinItems() uint64 { + if x != nil { + return x.xxx_hidden_MinItems + } + return 0 +} + +func (x *JSONSchema) GetUniqueItems() bool { + if x != nil { + return x.xxx_hidden_UniqueItems + } + return false +} + +func (x *JSONSchema) GetMaxProperties() uint64 { + if x != nil { + return x.xxx_hidden_MaxProperties + } + return 0 +} + +func (x *JSONSchema) GetMinProperties() uint64 { + if x != nil { + return x.xxx_hidden_MinProperties + } + return 0 +} + +func (x *JSONSchema) GetRequired() []string { + if x != nil { + return x.xxx_hidden_Required + } + return nil +} + +func (x *JSONSchema) GetArray() []string { + if x != nil { + return x.xxx_hidden_Array + } + return nil +} + +func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes { + if x != nil { + return x.xxx_hidden_Type + } + return nil +} + +func (x *JSONSchema) GetFormat() string { + if x != nil { + return x.xxx_hidden_Format + } + return "" +} + +func (x *JSONSchema) GetEnum() []string { + if x != nil { + return x.xxx_hidden_Enum + } + return nil +} + +func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration { + if x != nil { + return x.xxx_hidden_FieldConfiguration + } + return nil +} + +func (x *JSONSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *JSONSchema) SetRef(v string) { + x.xxx_hidden_Ref = v +} + +func (x *JSONSchema) SetTitle(v string) { + x.xxx_hidden_Title = v +} + +func (x *JSONSchema) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *JSONSchema) SetDefault(v string) { + x.xxx_hidden_Default = v +} + +func (x *JSONSchema) SetReadOnly(v bool) { + x.xxx_hidden_ReadOnly = v +} + +func (x *JSONSchema) SetExample(v string) { + x.xxx_hidden_Example = v +} + +func (x *JSONSchema) SetMultipleOf(v float64) { + x.xxx_hidden_MultipleOf = v +} + +func (x *JSONSchema) SetMaximum(v float64) { + x.xxx_hidden_Maximum = v +} + +func (x *JSONSchema) SetExclusiveMaximum(v bool) { + x.xxx_hidden_ExclusiveMaximum = v +} + +func (x *JSONSchema) SetMinimum(v float64) { + x.xxx_hidden_Minimum = v +} + +func (x *JSONSchema) SetExclusiveMinimum(v bool) { + x.xxx_hidden_ExclusiveMinimum = v +} + +func (x *JSONSchema) SetMaxLength(v uint64) { + x.xxx_hidden_MaxLength = v +} + +func (x *JSONSchema) SetMinLength(v uint64) { + x.xxx_hidden_MinLength = v +} + +func (x *JSONSchema) SetPattern(v string) { + x.xxx_hidden_Pattern = v +} + +func (x *JSONSchema) SetMaxItems(v uint64) { + x.xxx_hidden_MaxItems = v +} + +func (x *JSONSchema) SetMinItems(v uint64) { + x.xxx_hidden_MinItems = v +} + +func (x *JSONSchema) SetUniqueItems(v bool) { + x.xxx_hidden_UniqueItems = v +} + +func (x *JSONSchema) SetMaxProperties(v uint64) { + x.xxx_hidden_MaxProperties = v +} + +func (x *JSONSchema) SetMinProperties(v uint64) { + x.xxx_hidden_MinProperties = v +} + +func (x *JSONSchema) SetRequired(v []string) { + x.xxx_hidden_Required = v +} + +func (x *JSONSchema) SetArray(v []string) { + x.xxx_hidden_Array = v +} + +func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) { + x.xxx_hidden_Type = v +} + +func (x *JSONSchema) SetFormat(v string) { + x.xxx_hidden_Format = v +} + +func (x *JSONSchema) SetEnum(v []string) { + x.xxx_hidden_Enum = v +} + +func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) { + x.xxx_hidden_FieldConfiguration = v +} + +func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *JSONSchema) HasFieldConfiguration() bool { + if x == nil { + return false + } + return x.xxx_hidden_FieldConfiguration != nil +} + +func (x *JSONSchema) ClearFieldConfiguration() { + x.xxx_hidden_FieldConfiguration = nil +} + +type JSONSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string + // The title of the schema. + Title string + // A short description of the schema. + Description string + Default string + ReadOnly bool + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string + MultipleOf float64 + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 + ExclusiveMaximum bool + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 + ExclusiveMinimum bool + MaxLength uint64 + MinLength uint64 + Pattern string + MaxItems uint64 + MinItems uint64 + UniqueItems bool + MaxProperties uint64 + MinProperties uint64 + Required []string + // Items in 'array' must be unique. + Array []string + Type []JSONSchema_JSONSchemaSimpleTypes + // `Format` + Format string + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 JSONSchema_builder) Build() *JSONSchema { + m0 := &JSONSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Ref = b.Ref + x.xxx_hidden_Title = b.Title + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Default = b.Default + x.xxx_hidden_ReadOnly = b.ReadOnly + x.xxx_hidden_Example = b.Example + x.xxx_hidden_MultipleOf = b.MultipleOf + x.xxx_hidden_Maximum = b.Maximum + x.xxx_hidden_ExclusiveMaximum = b.ExclusiveMaximum + x.xxx_hidden_Minimum = b.Minimum + x.xxx_hidden_ExclusiveMinimum = b.ExclusiveMinimum + x.xxx_hidden_MaxLength = b.MaxLength + x.xxx_hidden_MinLength = b.MinLength + x.xxx_hidden_Pattern = b.Pattern + x.xxx_hidden_MaxItems = b.MaxItems + x.xxx_hidden_MinItems = b.MinItems + x.xxx_hidden_UniqueItems = b.UniqueItems + x.xxx_hidden_MaxProperties = b.MaxProperties + x.xxx_hidden_MinProperties = b.MinProperties + x.xxx_hidden_Required = b.Required + x.xxx_hidden_Array = b.Array + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Format = b.Format + x.xxx_hidden_Enum = b.Enum + x.xxx_hidden_FieldConfiguration = b.FieldConfiguration + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +type Tag struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Tag) Reset() { + *x = Tag{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Tag) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.xxx_hidden_ExternalDocs + } + return nil +} + +func (x *Tag) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *Tag) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Tag) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Tag) SetExternalDocs(v *ExternalDocumentation) { + x.xxx_hidden_ExternalDocs = v +} + +func (x *Tag) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *Tag) HasExternalDocs() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalDocs != nil +} + +func (x *Tag) ClearExternalDocs() { + x.xxx_hidden_ExternalDocs = nil +} + +type Tag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 Tag_builder) Build() *Tag { + m0 := &Tag{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_ExternalDocs = b.ExternalDocs + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +type SecurityDefinitions struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { + if x != nil { + return x.xxx_hidden_Security + } + return nil +} + +func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) { + x.xxx_hidden_Security = v +} + +type SecurityDefinitions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme +} + +func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions { + m0 := &SecurityDefinitions{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Security = b.Security + return m0 +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +type SecurityScheme struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + xxx_hidden_Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + xxx_hidden_In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"` + xxx_hidden_Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"` + xxx_hidden_AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + xxx_hidden_TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + xxx_hidden_Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` + xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityScheme) GetType() SecurityScheme_Type { + if x != nil { + return x.xxx_hidden_Type + } + return SecurityScheme_TYPE_INVALID +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *SecurityScheme) GetIn() SecurityScheme_In { + if x != nil { + return x.xxx_hidden_In + } + return SecurityScheme_IN_INVALID +} + +func (x *SecurityScheme) GetFlow() SecurityScheme_Flow { + if x != nil { + return x.xxx_hidden_Flow + } + return SecurityScheme_FLOW_INVALID +} + +func (x *SecurityScheme) GetAuthorizationUrl() string { + if x != nil { + return x.xxx_hidden_AuthorizationUrl + } + return "" +} + +func (x *SecurityScheme) GetTokenUrl() string { + if x != nil { + return x.xxx_hidden_TokenUrl + } + return "" +} + +func (x *SecurityScheme) GetScopes() *Scopes { + if x != nil { + return x.xxx_hidden_Scopes + } + return nil +} + +func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.xxx_hidden_Extensions + } + return nil +} + +func (x *SecurityScheme) SetType(v SecurityScheme_Type) { + x.xxx_hidden_Type = v +} + +func (x *SecurityScheme) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *SecurityScheme) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *SecurityScheme) SetIn(v SecurityScheme_In) { + x.xxx_hidden_In = v +} + +func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) { + x.xxx_hidden_Flow = v +} + +func (x *SecurityScheme) SetAuthorizationUrl(v string) { + x.xxx_hidden_AuthorizationUrl = v +} + +func (x *SecurityScheme) SetTokenUrl(v string) { + x.xxx_hidden_TokenUrl = v +} + +func (x *SecurityScheme) SetScopes(v *Scopes) { + x.xxx_hidden_Scopes = v +} + +func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) { + x.xxx_hidden_Extensions = v +} + +func (x *SecurityScheme) HasScopes() bool { + if x == nil { + return false + } + return x.xxx_hidden_Scopes != nil +} + +func (x *SecurityScheme) ClearScopes() { + x.xxx_hidden_Scopes = nil +} + +type SecurityScheme_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type + // A short description for security scheme. + Description string + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value +} + +func (b0 SecurityScheme_builder) Build() *SecurityScheme { + m0 := &SecurityScheme{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Name = b.Name + x.xxx_hidden_In = b.In + x.xxx_hidden_Flow = b.Flow + x.xxx_hidden_AuthorizationUrl = b.AuthorizationUrl + x.xxx_hidden_TokenUrl = b.TokenUrl + x.xxx_hidden_Scopes = b.Scopes + x.xxx_hidden_Extensions = b.Extensions + return m0 +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +type SecurityRequirement struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue { + if x != nil { + return x.xxx_hidden_SecurityRequirement + } + return nil +} + +func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) { + x.xxx_hidden_SecurityRequirement = v +} + +type SecurityRequirement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue +} + +func (b0 SecurityRequirement_builder) Build() *SecurityRequirement { + m0 := &SecurityRequirement{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SecurityRequirement = b.SecurityRequirement + return m0 +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +type Scopes struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Scopes) Reset() { + *x = Scopes{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Scopes) ProtoMessage() {} + +func (x *Scopes) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Scopes) GetScope() map[string]string { + if x != nil { + return x.xxx_hidden_Scope + } + return nil +} + +func (x *Scopes) SetScope(v map[string]string) { + x.xxx_hidden_Scope = v +} + +type Scopes_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string +} + +func (b0 Scopes_builder) Build() *Scopes { + m0 := &Scopes{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Scope = b.Scope + return m0 +} + +// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. +// These properties are not defined by OpenAPIv2, but they are used to control the generation. +type JSONSchema_FieldConfiguration struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *JSONSchema_FieldConfiguration) Reset() { + *x = JSONSchema_FieldConfiguration{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema_FieldConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema_FieldConfiguration) ProtoMessage() {} + +func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { + if x != nil { + return x.xxx_hidden_PathParamName + } + return "" +} + +func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) { + x.xxx_hidden_PathParamName = v +} + +type JSONSchema_FieldConfiguration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string +} + +func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration { + m0 := &JSONSchema_FieldConfiguration{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_PathParamName = b.PathParamName + return m0 +} + +// If the security scheme is of type "oauth2", then the value is a list of +// scope names required for the execution. For other security scheme types, +// the array MUST be empty. +type SecurityRequirement_SecurityRequirementValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityRequirement_SecurityRequirementValue) Reset() { + *x = SecurityRequirement_SecurityRequirementValue{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement_SecurityRequirementValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {} + +func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { + if x != nil { + return x.xxx_hidden_Scope + } + return nil +} + +func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) { + x.xxx_hidden_Scope = v +} + +type SecurityRequirement_SecurityRequirementValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Scope []string +} + +func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue { + m0 := &SecurityRequirement_SecurityRequirementValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Scope = b.Scope + return m0 +} + +var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67, + 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07, + 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, + 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, + 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, + 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, + 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, + 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, + 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a, + 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, + 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, + 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, + 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18, + 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04, + 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e, + 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a, + 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, + 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, + 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, + 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10, + 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, + 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, + 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04, + 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, + 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06, + 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ + (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme + (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters + (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header + (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response + (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info + (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact + (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License + (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema + (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes + nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 41: google.protobuf.Value +} +var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ + 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info + 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters + 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact + 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License + 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry + 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes + 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value + 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value + 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header + 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value + 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value + 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + 53, // [53:53] is the sub-list for method output_type + 53, // [53:53] is the sub-list for method input_type + 53, // [53:53] is the sub-list for extension type_name + 53, // [53:53] is the sub-list for extension extendee + 0, // [0:53] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() } +func file_protoc_gen_openapiv2_options_openapiv2_proto_init() { + if File_protoc_gen_openapiv2_options_openapiv2_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc, + NumEnums: 6, + NumMessages: 35, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs, + EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes, + MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes, + }.Build() + File_protoc_gen_openapiv2_options_openapiv2_proto = out.File + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 00000000000..04b4bebf3d5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,98 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", + "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_genproto_googleapis_rpc//status", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go new file mode 100644 index 00000000000..00b2228a1de --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -0,0 +1,417 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound +// header isn't present. If the value is 0 the sent `context` will not have a timeout. +var DefaultContextTimeout = 0 * time.Second + +// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed. +// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context. +var malformedHTTPHeaders = map[string]struct{}{ + "connection": {}, +} + +type ( + rpcMethodKey struct{} + httpPathPatternKey struct{} + httpPatternKey struct{} + + AnnotateContextOption func(ctx context.Context) context.Context +) + +func WithHTTPPathPattern(pattern string) AnnotateContextOption { + return func(ctx context.Context) context.Context { + return withHTTPPathPattern(ctx, pattern) + } +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func isValidGRPCMetadataKey(key string) bool { + // Must be a valid gRPC "Header-Name" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means 0-9 a-z _ - . + // Only lowercase letters are valid in the wire protocol, but the client library will normalize + // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable. + bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + validLowercaseLetter := ch >= 'a' && ch <= 'z' + validUppercaseLetter := ch >= 'A' && ch <= 'Z' + validDigit := ch >= '0' && ch <= '9' + validOther := ch == '.' || ch == '-' || ch == '_' + if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther { + return false + } + } + return true +} + +func isValidGRPCMetadataTextValue(textValue string) bool { + // Must be a valid gRPC "ASCII-Value" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive. + bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + if ch < 0x20 || ch > 0x7E { + return false + } + } + return true +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + var pairs []string + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + + for _, val := range vals { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + if !isValidGRPCMetadataKey(h) { + grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h) + continue + } + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } else if !isValidGRPCMetadataTextValue(val) { + grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h) + continue + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + xff := req.Header.Values(xForwardedFor) + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + xff = append(xff, remoteIP) + } + } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + if len(md) == 0 { + return ctx, nil, nil + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + if ctx == nil { + return md, false + } + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +// ServerTransportStream implements grpc.ServerTransportStream. +// It should only be used by the generated files to support grpc.SendHeader +// outside of gRPC server use. +type ServerTransportStream struct { + mu sync.Mutex + header metadata.MD + trailer metadata.MD +} + +// Method returns the method for the stream. +func (s *ServerTransportStream) Method() string { + return "" +} + +// Header returns the header metadata of the stream. +func (s *ServerTransportStream) Header() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.header.Copy() +} + +// SetHeader sets the header metadata. +func (s *ServerTransportStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SendHeader sets the header metadata. +func (s *ServerTransportStream) SendHeader(md metadata.MD) error { + return s.SetHeader(md) +} + +// Trailer returns the cached trailer metadata. +func (s *ServerTransportStream) Trailer() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.trailer.Copy() +} + +// SetTrailer sets the trailer metadata. +func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + return + } +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permanent request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} + +// isMalformedHTTPHeader checks whether header belongs to the list of +// "malformed headers" and would be rejected by the gRPC server. +func isMalformedHTTPHeader(header string) bool { + _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)] + return isMalformed +} + +// RPCMethod returns the method string for the server context. The returned +// string is in the format of "/package.service/method". +func RPCMethod(ctx context.Context) (string, bool) { + m := ctx.Value(rpcMethodKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context { + return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName) +} + +// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists. +// The format of the returned string is defined by the google.api.http path template type. +func HTTPPathPattern(ctx context.Context) (string, bool) { + m := ctx.Value(httpPathPatternKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { + return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) +} + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go new file mode 100644 index 00000000000..2e50082ad11 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into an int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into an int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of byte slices. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Support for google.protobuf.wrappers on top of primitive types + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrapperspb.StringValue, error) { + return wrapperspb.String(val), nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrapperspb.FloatValue, error) { + parsedVal, err := Float32(val) + return wrapperspb.Float(parsedVal), err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { + parsedVal, err := Float64(val) + return wrapperspb.Double(parsedVal), err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrapperspb.BoolValue, error) { + parsedVal, err := Bool(val) + return wrapperspb.Bool(parsedVal), err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrapperspb.Int32Value, error) { + parsedVal, err := Int32(val) + return wrapperspb.Int32(parsedVal), err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { + parsedVal, err := Uint32(val) + return wrapperspb.UInt32(parsedVal), err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrapperspb.Int64Value, error) { + parsedVal, err := Int64(val) + return wrapperspb.Int64(parsedVal), err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { + parsedVal, err := Uint64(val) + return wrapperspb.UInt64(parsedVal), err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrapperspb.BytesValue, error) { + parsedVal, err := Bytes(val) + return wrapperspb.Bytes(parsedVal), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go new file mode 100644 index 00000000000..b6e5ddf7a9f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go new file mode 100644 index 00000000000..bbe7decf09b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -0,0 +1,204 @@ +package runtime + +import ( + "context" + "errors" + "io" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ErrorHandlerFunc is the signature used to configure error handling. +type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +// StreamErrorHandlerFunc is the signature used to configure stream error handling. +type StreamErrorHandlerFunc func(context.Context, error) *status.Status + +// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors. +type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int) + +// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error +// passed to the DefaultRoutingErrorHandler. +type HTTPStatusError struct { + HTTPStatus int + Err error +} + +func (e *HTTPStatusError) Error() string { + return e.Err.Error() +} + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return 499 + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + default: + grpclog.Warningf("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError + } +} + +// HTTPError uses the mux-configured error handler. +func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + mux.errorHandler(ctx, mux, marshaler, w, r, err) +} + +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + +// DefaultHTTPErrorHandler is the default error handler. +// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. +// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is +// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler +// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode +// are insufficient for. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body written by this function is a Status message marshaled by the Marshaler. +func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` + + var customStatus *HTTPStatusError + if errors.As(err, &customStatus) { + err = customStatus.Err + } + + s := status.Convert(err) + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) + w.Header().Set("Content-Type", contentType) + + if s.Code() == codes.Unauthenticated { + w.Header().Set("WWW-Authenticate", s.Message()) + } + + buf, merr := marshaler.Marshal(respRw) + if merr != nil { + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if ok { + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + } + + st := HTTPStatusFromCode(s.Code()) + if customStatus != nil { + st = customStatus.HTTPStatus + } + + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + + if ok && requestAcceptsTrailers(r) { + handleForwardResponseTrailer(w, mux, md) + } +} + +func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + return status.Convert(err) +} + +// DefaultRoutingErrorHandler is our default handler for routing errors. +// By default http error codes mapped on the following error codes: +// +// NotFound -> grpc.NotFound +// StatusBadRequest -> grpc.InvalidArgument +// MethodNotAllowed -> grpc.Unimplemented +// Other -> grpc.Internal, method is not expecting to be called for anything else +func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { + case http.StatusBadRequest: + sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus)) + case http.StatusMethodNotAllowed: + sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus)) + case http.StatusNotFound: + sterr = status.Error(codes.NotFound, http.StatusText(httpStatus)) + } + mux.errorHandler(ctx, mux, marshaler, w, r, sterr) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go new file mode 100644 index 00000000000..2fcd7af3c40 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -0,0 +1,168 @@ +package runtime + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "sort" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { + fd := fields.ByName(protoreflect.Name(name)) + if fd != nil { + return fd + } + + return fields.ByJSONName(name) +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + + if err := json.NewDecoder(r).Decode(&root); err != nil { + if errors.Is(err, io.EOF) { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + m, ok := item.node.(map[string]interface{}) + switch { + case ok && len(m) > 0: + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { + return nil, errors.New("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) + if fd == nil { + return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName()) + } + + if isDynamicProtoMessage(fd.Message()) { + for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath + } + queue = append(queue, fieldMaskPathItem{path: newPath}) + } + continue + } + + if isProtobufAnyMessage(fd.Message()) && !fd.IsList() { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) + continue + } else { + return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName()) + } + + } + + child := fieldMaskPathItem{ + node: v, + } + if item.path == "" { + child.path = string(fd.FullName().Name()) + } else { + child.path = item.path + "." + string(fd.FullName().Name()) + } + + switch { + case fd.IsList(), fd.IsMap(): + // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86 + // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop. + fm.Paths = append(fm.Paths, child.path) + case fd.Message() != nil: + child.msg = item.msg.Get(fd).Message() + fallthrough + default: + queue = append(queue, child) + } + } + case ok && len(m) == 0: + fallthrough + case len(item.path) > 0: + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + // Sort for deterministic output in the presence + // of repeated fields. + sort.Strings(fm.Paths) + + return fm, nil +} + +func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Any") +} + +func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value") +} + +// buildPathsBlindly does not attempt to match proto field names to the +// json value keys. Instead it relies completely on the structure of +// the unmarshalled json contained within in. +// Returns a slice containing all subpaths with the root at the +// passed in name and json value. +func buildPathsBlindly(name string, in interface{}) []string { + m, ok := in.(map[string]interface{}) + if !ok { + return []string{name} + } + + var paths []string + queue := []fieldMaskPathItem{{path: name, node: m}} + for len(queue) > 0 { + cur := queue[0] + queue = queue[1:] + + m, ok := cur.node.(map[string]interface{}) + if !ok { + // This should never happen since we should always check that we only add + // nodes of type map[string]interface{} to the queue. + continue + } + for k, v := range m { + if mi, ok := v.(map[string]interface{}); ok { + queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi}) + } else { + // This is not a struct, so there are no more levels to descend. + curPath := cur.path + "." + k + paths = append(paths, curPath) + } + } + } + return paths +} + +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // parent message + msg protoreflect.Message +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go new file mode 100644 index 00000000000..2f0b9e9e0f8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -0,0 +1,251 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + "strconv" + "strings" + + "google.golang.org/genproto/googleapis/api/httpbody" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + rc := http.NewResponseController(w) + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if errors.Is(err, io.EOF) { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + + if !wroteHeader { + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) + } + + var buf []byte + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) + switch { + case respRw == nil: + buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) + case isHTTPBody: + buf = httpBody.GetData() + default: + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { + result["result"] = rb.XXX_ResponseBody() + } + + buf, err = marshaler.Marshal(result) + } + + if err != nil { + grpclog.Errorf("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) + return + } + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h)) + } + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if ok { + handleForwardResponseServerMetadata(w, mux, md) + } + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(req) + + if ok && doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + contentType := marshaler.ContentType(resp) + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + if rb, ok := respRw.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(respRw) + } + if err != nil { + grpclog.Errorf("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if !doForwardTrailers && mux.writeContentLength { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { + grpclog.Errorf("Failed to write response: %v", err) + } + + if ok && doForwardTrailers { + handleForwardResponseTrailer(w, mux, md) + } +} + +func requestAcceptsTrailers(req *http.Request) bool { + te := req.Header.Get("TE") + return strings.Contains(strings.ToLower(te), "trailers") +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) + } + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } +} + +func errorChunk(st *status.Status) map[string]proto.Message { + return map[string]proto.Message{"error": st.Proto()} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go new file mode 100644 index 00000000000..6de2e220c7f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go @@ -0,0 +1,32 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType returns its specified content type in case v is a +// google.api.HttpBody message, otherwise it will fall back to the default Marshalers +// content type. +func (h *HTTPBodyMarshaler) ContentType(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType(v) +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetData(), nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go new file mode 100644 index 00000000000..fe52081ab94 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -0,0 +1,50 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go new file mode 100644 index 00000000000..3d07063007d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -0,0 +1,349 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "google.golang.org/protobuf/encoding/protojson" marshaler. +// It supports the full functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb struct { + protojson.MarshalOptions + protojson.UnmarshalOptions +} + +// ContentType always returns "application/json". +func (*JSONPb) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } + _, err = w.Write(buf) + return err + } + + b, err := j.MarshalOptions.Marshal(p) + if err != nil { + return err + } + + _, err = w.Write(b) + return err +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeFor[proto.Message]() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshal arbitrary data structures into JSON, +// it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitUnpopulated { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + var err error + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { + _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"") + } + if err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, j.UnmarshalOptions, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{ + Decoder: d, + UnmarshalOptions: j.UnmarshalOptions, + } +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder + protojson.UnmarshalOptions +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, unmarshaler, v) +} + +func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, unmarshaler, v) + } + + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), p) +} + +func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if v == nil { + null := json.RawMessage("null") + v = &null + } + if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if rv.Kind() == reflect.Slice { + if rv.Type().Elem().Kind() == reflect.Uint8 { + var sl []byte + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.SetBytes(sl) + } + return nil + } + + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.Set(reflect.MakeSlice(rv.Type(), 0, 0)) + } + for _, item := range sl { + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.Set(reflect.Append(rv, bv.Elem())) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch v := repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoEnum = reflect.TypeFor[protoEnum]() + +var typeProtoMessage = reflect.TypeFor[proto.Message]() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go new file mode 100644 index 00000000000..398c780dc22 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go @@ -0,0 +1,60 @@ +package runtime + +import ( + "errors" + "io" + + "google.golang.org/protobuf/proto" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType(_ interface{}) string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := io.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + if _, err := writer.Write(buffer); err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go new file mode 100644 index 00000000000..b1dfc37af9b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -0,0 +1,58 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + // The parameter describes the type which is being marshalled, which can sometimes + // affect the content type returned. + ContentType(v interface{}) string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record separator for the stream. + Delimiter() []byte +} + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go new file mode 100644 index 00000000000..07c28112c89 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -0,0 +1,109 @@ +package runtime + +import ( + "errors" + "mime" + "net/http" + + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }, + } +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + contentType, _, err := mime.ParseMediaType(contentTypeVal) + if err != nil { + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) + continue + } + if m, ok := mux.marshalers.mimeMap[contentType]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go new file mode 100644 index 00000000000..3eb16167173 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -0,0 +1,553 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// UnescapingMode defines the behavior of ServeMux when unescaping path parameters. +type UnescapingMode int + +const ( + // UnescapingModeLegacy is the default V2 behavior, which escapes the entire + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + + // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + + // UnescapingModeAllExceptSlash unescapes URL path parameters except path + // separators, which will be left as "%2F". + UnescapingModeAllExceptSlash + + // UnescapingModeAllCharacters unescapes all URL path parameters. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. + // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's + // reference implementation + UnescapingModeDefault = UnescapingModeLegacy +) + +var encodedPathSplitter = regexp.MustCompile("(/|%2F)") + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + middlewares []Middleware + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + outgoingTrailerMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + errorHandler ErrorHandlerFunc + streamErrorHandler StreamErrorHandlerFunc + routingErrorHandler RoutingErrorHandlerFunc + disablePathLengthFallback bool + unescapingMode UnescapingMode + writeContentLength bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode +// for more information. +func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.unescapingMode = mode + } +} + +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function. +// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'. +// Other headers are not added to the gRPC metadata. +func DefaultHeaderMatcher(key string) (string, bool) { + switch key = textproto.CanonicalMIMEHeaderKey(key); { + case isPermanentHTTPHeader(key): + return MetadataPrefix + key, true + case strings.HasPrefix(key, MetadataHeaderPrefix): + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +func defaultOutgoingHeaderMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true +} + +func defaultOutgoingTrailerMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + for _, header := range fn.matchedMalformedHeaders() { + grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) + } + + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server. +func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string { + if fn == nil { + return nil + } + headers := make([]string, 0) + for header := range malformedHTTPHeaders { + out, accept := fn(header) + if accept && isMalformedHTTPHeader(out) { + headers = append(headers, out) + } + } + return headers +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingTrailerMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to configure a custom error response. +func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.errorHandler = fn + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors. +// +// Method called for errors which can happen before gRPC route selected or executed. +// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest +func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.routingErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithWriteContentLength returns a ServeMuxOption to enable writing content length on non-streaming responses +func WithWriteContentLength() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.writeContentLength = true + } +} + +// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. +// When called the handler will forward the request to the upstream grpc service health check (defined in the +// gRPC Health Checking Protocol). +// +// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how +// to setup the protocol in the grpc server. +// +// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest. +func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption { + return func(s *ServeMux) { + // error can be ignored since pattern is definitely valid + _ = s.HandlePath( + http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, + ) { + _, outboundMarshaler := MarshalerForRequest(s, r) + annotatedContext, err := AnnotateContext(r.Context(), s, r, grpc_health_v1.Health_Check_FullMethodName, WithHTTPPathPattern(endpointPath)) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + var md ServerMetadata + resp, err := healthCheckClient.Check(annotatedContext, &grpc_health_v1.HealthCheckRequest{ + Service: r.URL.Query().Get("service"), + }, grpc.Header(&md.HeaderMD), grpc.Trailer(&md.TrailerMD)) + annotatedContext = NewServerMetadataContext(annotatedContext, md) + if err != nil { + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) + return + } + + w.Header().Set("Content-Type", "application/json") + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + switch resp.GetStatus() { + case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN: + err = status.Error(codes.Unavailable, resp.String()) + case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN: + err = status.Error(codes.NotFound, resp.String()) + } + + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) + return + } + + _ = outboundMarshaler.NewEncoder(w).Encode(resp) + }) + } +} + +// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux. +// +// See WithHealthEndpointAt for the general implementation. +func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption { + return WithHealthEndpointAt(healthCheckClient, "/healthz") +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher + } + if serveMux.outgoingTrailerMatcher == nil { + serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } + s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) +} + +// HandlePath allows users to configure custom path handlers. +// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/ +func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error { + compiler, err := httprule.Parse(pathPattern) + if err != nil { + return fmt.Errorf("parsing path pattern: %w", err) + } + tp := compiler.Compile() + pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb) + if err != nil { + return fmt.Errorf("creating new pattern: %w", err) + } + s.Handle(meth, pattern, h) + return nil +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest) + return + } + + // TODO(v3): remove UnescapingModeLegacy + if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" { + path = r.URL.RawPath + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + r.Method = strings.ToUpper(override) + } + + var pathComponents []string + // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F" + // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the + // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default + // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved + if s.unescapingMode == UnescapingModeAllCharacters { + pathComponents = encodedPathSplitter.Split(path[1:], -1) + } else { + pathComponents = strings.Split(path[1:], "/") + } + + lastPathComponent := pathComponents[len(pathComponents)-1] + + for _, h := range s.handlers[r.Method] { + // If the pattern has a verb, explicitly look for a suffix in the last + // component that matches a colon plus the verb. This allows us to + // handle some cases that otherwise can't be correctly handled by the + // former LastIndex case, such as when the verb literal itself contains + // a colon. This should work for all cases that have run through the + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. + + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + s.handleHandler(h, w, r, pathParams) + return + } + + // if no handler has found for the request, lookup for other methods + // to handle POST -> GET fallback if the request is subject to path + // length fallback. + // Note we are not eagerly checking the request here as we want to return the + // right HTTP status code, and we need to process the fallback candidates in + // order to do that. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + // Also, only consider POST -> GET fallbacks, and avoid falling back to + // potentially dangerous operations like DELETE. + if s.isPathLengthFallback(r) && m == http.MethodGet { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + s.handleHandler(h, w, r, pathParams) + return + } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed) + return + } + } + + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go new file mode 100644 index 00000000000..e54507145b6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -0,0 +1,381 @@ +package runtime + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type MalformedSequenceError string + +func (e MalformedSequenceError) Error() string { + return "malformed path escape " + strconv.Quote(string(e)) +} + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string +} + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { + if version != 1 { + grpclog.Errorf("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Errorf("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Error("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Errorf("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Errorf("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// MatchAndEscape will return an error if no Patterns matched or if a pattern +// matched but contained malformed escape sequences. If successful, the function +// returns a mapping from field paths to their captured values. +func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) { + if p.verb != verb { + if p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + var err error + + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } else if op.code == utilities.OpPush { + if c, err = unescape(c, unescapingMode, false); err != nil { + return nil, err + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + c := strings.Join(components[pos:end], "/") + if c, err = unescape(c, unescapingMode, true); err != nil { + return nil, err + } + stack = append(stack, c) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// It will never perform per-component unescaping (see: UnescapingModeLegacy). +// MatchAndEscape will return an error if no Patterns matched. If successful, +// the function returns a mapping from field paths to their captured values. +// +// Deprecated: Use MatchAndEscape. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + return p.MatchAndEscape(components, verb, UnescapingModeDefault) +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +/* + * The following code is adopted and modified from Go's standard library + * and carries the attached license. + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ + +// ishex returns whether or not the given byte is a valid hex character +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func isRFC6570Reserved(c byte) bool { + switch c { + case '!', '#', '$', '&', '\'', '(', ')', '*', + '+', ',', '/', ':', ';', '=', '?', '@', '[', ']': + return true + default: + return false + } +} + +// unhex converts a hex point to the bit representation +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// shouldUnescapeWithMode returns true if the character is escapable with the +// given mode +func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool { + switch mode { + case UnescapingModeAllExceptReserved: + if isRFC6570Reserved(c) { + return false + } + case UnescapingModeAllExceptSlash: + if c == '/' { + return false + } + case UnescapingModeAllCharacters: + return true + } + return true +} + +// unescape unescapes a path string using the provided mode +func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) { + // TODO(v3): remove UnescapingModeLegacy + if mode == UnescapingModeLegacy { + return s, nil + } + + if !multisegment { + mode = UnescapingModeAllCharacters + } + + // Count %, check that they're well-formed. + n := 0 + for i := 0; i < len(s); { + if s[i] == '%' { + n++ + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + s = s[i:] + if len(s) > 3 { + s = s[:3] + } + + return "", MalformedSequenceError(s) + } + i += 3 + } else { + i++ + } + } + + if n == 0 { + return s, nil + } + + var t strings.Builder + t.Grow(len(s)) + for i := 0; i < len(s); i++ { + switch s[i] { + case '%': + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if shouldUnescapeWithMode(c, mode) { + t.WriteByte(c) + i += 2 + continue + } + fallthrough + default: + t.WriteByte(s[i]) + } + } + + return t.String(), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go new file mode 100644 index 00000000000..f710036b350 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "google.golang.org/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to an int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to an int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go new file mode 100644 index 00000000000..8549dfb97af --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -0,0 +1,378 @@ +package runtime + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +var currentQueryParser QueryParameterParser = &DefaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +// DefaultQueryParser is a QueryParameterParser which implements the default +// query parameters parsing behavior. +// +// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context. +type DefaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + + msgValue := msg.ProtoReflect() + fieldPath := normalizeFieldPath(msgValue, strings.Split(key, ".")) + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) +} + +func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string { + newFieldPath := make([]string, 0, len(fieldPath)) + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + fieldDesc := fields.ByTextName(fieldName) + if fieldDesc == nil { + fieldDesc = fields.ByJSONName(fieldName) + } + if fieldDesc == nil { + // return initial field path values if no matching message field was found + return fieldPath + } + + newFieldPath = append(newFieldPath, string(fieldDesc.Name())) + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated { + return fieldPath + } + + // Get the nested message + msgValue = msgValue.Get(fieldDesc).Message() + } + + return newFieldPath +} + +func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { + if len(fieldPath) < 1 { + return errors.New("no field path") + } + if len(values) < 1 { + return errors.New("no value provided") + } + + var fieldDescriptor protoreflect.FieldDescriptor + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + + // Get field by name + fieldDescriptor = fields.ByName(protoreflect.Name(fieldName)) + if fieldDescriptor == nil { + fieldDescriptor = fields.ByJSONName(fieldName) + if fieldDescriptor == nil { + // We're not returning an error here because this could just be + // an extra query parameter that isn't part of the request. + grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, ".")) + return nil + } + } + + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { + if f := msgValue.WhichOneof(of); f != nil { + if fieldDescriptor.Message() == nil || fieldDescriptor.FullName() != f.FullName() { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + } + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated { + return fmt.Errorf("invalid path: %q is not a message", fieldName) + } + + // Get the nested message + msgValue = msgValue.Mutable(fieldDescriptor).Message() + } + + switch { + case fieldDescriptor.IsList(): + return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) + case fieldDescriptor.IsMap(): + return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values) + } + + if len(values) > 1 { + return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", ")) + } + + return populateField(fieldDescriptor, msgValue, values[0]) +} + +func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err) + } + + msgValue.Set(fieldDescriptor, v) + return nil +} + +func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error { + for _, value := range values { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err) + } + list.Append(v) + } + + return nil +} + +func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName()) + } + + key, err := parseField(fieldDescriptor.MapKey(), values[0]) + if err != nil { + return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err) + } + + value, err := parseField(fieldDescriptor.MapValue(), values[1]) + if err != nil { + return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err) + } + + mp.Set(key.MapKey(), value) + + return nil +} + +func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) { + switch fieldDescriptor.Kind() { + case protoreflect.BoolKind: + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) + if err != nil { + if errors.Is(err, protoregistry.NotFound) { + return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) + } + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name + v := enum.Descriptor().Values().ByName(protoreflect.Name(value)) + if v == nil { + i, err := strconv.Atoi(value) + if err != nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number + if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } + return protoreflect.ValueOfEnum(v.Number()), nil + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt32(int32(v)), nil + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt64(v), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint32(uint32(v)), nil + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint64(v), nil + case protoreflect.FloatKind: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat32(float32(v)), nil + case protoreflect.DoubleKind: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat64(v), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBytes(v), nil + case protoreflect.MessageKind, protoreflect.GroupKind: + return parseMessage(fieldDescriptor.Message(), value) + default: + panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind())) + } +} + +func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) { + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp + case "google.protobuf.Duration": + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = durationpb.New(d) + case "google.protobuf.DoubleValue": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Double(v) + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Float(float32(v)) + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int64(v) + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int32(int32(v)) + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt64(v) + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt32(uint32(v)) + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bool(v) + case "google.protobuf.StringValue": + msg = wrapperspb.String(value) + case "google.protobuf.BytesValue": + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bytes(v) + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm + case "google.protobuf.Value": + var v structpb.Value + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + case "google.protobuf.Struct": + var v structpb.Struct + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } + + return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel new file mode 100644 index 00000000000..b8940946577 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "utilities", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +) + +go_test( + name = "utilities_test", + size = "small", + srcs = [ + "string_array_flag_test.go", + "trie_test.go", + ], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go new file mode 100644 index 00000000000..cf79a4d5886 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go new file mode 100644 index 00000000000..38ca39cc538 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// OpCode is an opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go new file mode 100644 index 00000000000..01d26edae3c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go @@ -0,0 +1,19 @@ +package utilities + +import ( + "bytes" + "io" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go new file mode 100644 index 00000000000..66aa5f2dcc5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -0,0 +1,33 @@ +package utilities + +import ( + "flag" + "strings" +) + +// flagInterface is a cut down interface to `flag` +type flagInterface interface { + Var(value flag.Value, name string, usage string) +} + +// StringArrayFlag defines a flag with the specified name and usage string. +// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag. +func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags { + value := &StringArrayFlags{} + f.Var(value, name, usage) + return value +} + +// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var` +type StringArrayFlags []string + +// String returns a string representation of `StringArrayFlags` +func (i *StringArrayFlags) String() string { + return strings.Join(*i, ",") +} + +// Set appends a value to `StringArrayFlags` +func (i *StringArrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go new file mode 100644 index 00000000000..dd99b0ed256 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go @@ -0,0 +1,174 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + encoded := make([]int, 0, len(seq)) + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + return k < len(sj) +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/in-toto/attestation/LICENSE b/vendor/github.com/in-toto/attestation/LICENSE new file mode 100644 index 00000000000..702a3365c06 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/LICENSE @@ -0,0 +1,13 @@ +Copyright 2021 in-toto Developers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go new file mode 100644 index 00000000000..ae912f0d1e2 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go @@ -0,0 +1,128 @@ +/* +Wrapper APIs for in-toto attestation ResourceDescriptor protos. +*/ + +package v1 + +import ( + "encoding/hex" + "errors" + "fmt" +) + +var ( + ErrIncorrectDigestLength = errors.New("digest has incorrect length") + ErrInvalidDigestEncoding = errors.New("digest is not valid hex-encoded string") + ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") +) + +type HashAlgorithm string + +const ( + AlgorithmMD5 HashAlgorithm = "md5" + AlgorithmSHA1 HashAlgorithm = "sha1" + AlgorithmSHA224 HashAlgorithm = "sha224" + AlgorithmSHA512_224 HashAlgorithm = "sha512_224" + AlgorithmSHA256 HashAlgorithm = "sha256" + AlgorithmSHA512_256 HashAlgorithm = "sha512_256" + AlgorithmSHA384 HashAlgorithm = "sha384" + AlgorithmSHA512 HashAlgorithm = "sha512" + AlgorithmSHA3_224 HashAlgorithm = "sha3_224" + AlgorithmSHA3_256 HashAlgorithm = "sha3_256" + AlgorithmSHA3_384 HashAlgorithm = "sha3_384" + AlgorithmSHA3_512 HashAlgorithm = "sha3_512" + AlgorithmGitBlob HashAlgorithm = "gitBlob" + AlgorithmGitCommit HashAlgorithm = "gitCommit" + AlgorithmGitTag HashAlgorithm = "gitTag" + AlgorithmGitTree HashAlgorithm = "gitTree" + AlgorithmDirHash HashAlgorithm = "dirHash" +) + +// HashAlgorithms indexes the known algorithms in a dictionary +// by their string value +var HashAlgorithms = map[string]HashAlgorithm{ + "md5": AlgorithmMD5, + "sha1": AlgorithmSHA1, + "sha224": AlgorithmSHA224, + "sha512_224": AlgorithmSHA512_224, + "sha256": AlgorithmSHA256, + "sha512_256": AlgorithmSHA512_256, + "sha384": AlgorithmSHA384, + "sha512": AlgorithmSHA512, + "sha3_224": AlgorithmSHA3_224, + "sha3_256": AlgorithmSHA3_256, + "sha3_384": AlgorithmSHA3_384, + "sha3_512": AlgorithmSHA3_512, + "gitBlob": AlgorithmGitBlob, + "gitCommit": AlgorithmGitCommit, + "gitTag": AlgorithmGitTag, + "gitTree": AlgorithmGitTree, + "dirHash": AlgorithmDirHash, +} + +// HexLength returns the expected length of an algorithm's hash when hexencoded +func (algo HashAlgorithm) HexLength() int { + switch algo { + case AlgorithmMD5: + return 16 + case AlgorithmSHA1, AlgorithmGitBlob, AlgorithmGitCommit, AlgorithmGitTag, AlgorithmGitTree: + return 20 + case AlgorithmSHA224, AlgorithmSHA512_224, AlgorithmSHA3_224: + return 28 + case AlgorithmSHA256, AlgorithmSHA512_256, AlgorithmSHA3_256, AlgorithmDirHash: + return 32 + case AlgorithmSHA384, AlgorithmSHA3_384: + return 48 + case AlgorithmSHA512, AlgorithmSHA3_512: + return 64 + default: + return 0 + } +} + +// String returns the hash algorithm name as a string +func (algo HashAlgorithm) String() string { + return string(algo) +} + +// Indicates if a given fixed-size hash algorithm is supported by default and returns the algorithm's +// digest size in bytes, if supported. We assume gitCommit and dirHash are aliases for sha1 and sha256, respectively. +// +// SHA digest sizes from https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// MD5 digest size from https://www.rfc-editor.org/rfc/rfc1321.html#section-1 +func isSupportedFixedSizeAlgorithm(algString string) (bool, int) { + algo := HashAlgorithm(algString) + return algo.HexLength() > 0, algo.HexLength() +} + +func (d *ResourceDescriptor) Validate() error { + // at least one of name, URI or digest are required + if d.GetName() == "" && d.GetUri() == "" && len(d.GetDigest()) == 0 { + return ErrRDRequiredField + } + + if len(d.GetDigest()) > 0 { + for alg, digest := range d.GetDigest() { + + // Per https://github.com/in-toto/attestation/blob/main/spec/v1/digest_set.md + // check encoding and length for supported algorithms; + // use of custom, unsupported algorithms is allowed and does not not generate validation errors. + supported, size := isSupportedFixedSizeAlgorithm(alg) + if supported { + // the in-toto spec expects a hex-encoded string in DigestSets for supported algorithms + hashBytes, err := hex.DecodeString(digest) + + if err != nil { + return fmt.Errorf("%w (%s: %s)", ErrInvalidDigestEncoding, alg, digest) + } + + // check the length of the digest + if len(hashBytes) != size { + return fmt.Errorf("%w: got %d bytes, want %d bytes (%s: %s)", ErrIncorrectDigestLength, len(hashBytes), size, alg, digest) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go new file mode 100644 index 00000000000..0dd94ea2634 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -0,0 +1,195 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v4.24.4 +// source: in_toto_attestation/v1/resource_descriptor.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 ResourceDescriptor. +// https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md +// Validation of all fields is left to the users of this proto. +type ResourceDescriptor struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` + MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // Per the Struct protobuf spec, this type corresponds to + // a JSON Object, which is truly a map under the hood. + // So, the Struct a) is still consistent with our specification for + // the `annotations` field, and b) has native support in some language + // bindings making their use easier in implementations. + // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct + Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceDescriptor) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *ResourceDescriptor) GetDigest() map[string]string { + if x != nil { + return x.Digest + } + return nil +} + +func (x *ResourceDescriptor) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ResourceDescriptor) GetDownloadLocation() string { + if x != nil { + return x.DownloadLocation + } + return "" +} + +func (x *ResourceDescriptor) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ResourceDescriptor) GetAnnotations() *structpb.Struct { + if x != nil { + return x.Annotations + } + return nil +} + +var File_in_toto_attestation_v1_resource_descriptor_proto protoreflect.FileDescriptor + +const file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = "" + + "\n" + + "0in_toto_attestation/v1/resource_descriptor.proto\x12\x16in_toto_attestation.v1\x1a\x1cgoogle/protobuf/struct.proto\"\xe6\x02\n" + + "\x12ResourceDescriptor\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x10\n" + + "\x03uri\x18\x02 \x01(\tR\x03uri\x12N\n" + + "\x06digest\x18\x03 \x03(\v26.in_toto_attestation.v1.ResourceDescriptor.DigestEntryR\x06digest\x12\x18\n" + + "\acontent\x18\x04 \x01(\fR\acontent\x12+\n" + + "\x11download_location\x18\x05 \x01(\tR\x10downloadLocation\x12\x1d\n" + + "\n" + + "media_type\x18\x06 \x01(\tR\tmediaType\x129\n" + + "\vannotations\x18\a \x01(\v2\x17.google.protobuf.StructR\vannotations\x1a9\n" + + "\vDigestEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01BG\n" + + "\x1fio.github.intoto.attestation.v1Z$github.com/in-toto/attestation/go/v1b\x06proto3" + +var ( + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData []byte +) + +func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc), len(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc))) + }) + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData +} + +var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []any{ + (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor + nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.ResourceDescriptor.digest:type_name -> in_toto_attestation.v1.ResourceDescriptor.DigestEntry + 2, // 1: in_toto_attestation.v1.ResourceDescriptor.annotations:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_resource_descriptor_proto_init() } +func file_in_toto_attestation_v1_resource_descriptor_proto_init() { + if File_in_toto_attestation_v1_resource_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc), len(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_resource_descriptor_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_resource_descriptor_proto = out.File + file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = nil + file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.go b/vendor/github.com/in-toto/attestation/go/v1/statement.go new file mode 100644 index 00000000000..f63d5f0d747 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.go @@ -0,0 +1,50 @@ +/* +Wrapper APIs for in-toto attestation Statement layer protos. +*/ + +package v1 + +import "errors" + +const StatementTypeUri = "https://in-toto.io/Statement/v1" + +var ( + ErrInvalidStatementType = errors.New("wrong statement type") + ErrSubjectRequired = errors.New("at least one subject required") + ErrDigestRequired = errors.New("at least one digest required") + ErrPredicateTypeRequired = errors.New("predicate type required") + ErrPredicateRequired = errors.New("predicate object required") +) + +func (s *Statement) Validate() error { + if s.GetType() != StatementTypeUri { + return ErrInvalidStatementType + } + + if s.GetSubject() == nil || len(s.GetSubject()) == 0 { + return ErrSubjectRequired + } + + // check all resource descriptors in the subject + subject := s.GetSubject() + for _, rd := range subject { + if err := rd.Validate(); err != nil { + return err + } + + // v1 statements require the digest to be set in the subject + if len(rd.GetDigest()) == 0 { + return ErrDigestRequired + } + } + + if s.GetPredicateType() == "" { + return ErrPredicateTypeRequired + } + + if s.GetPredicate() == nil { + return ErrPredicateRequired + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go new file mode 100644 index 00000000000..bc76eaf2617 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v4.24.4 +// source: in_toto_attestation/v1/statement.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 Statement. +// https://github.com/in-toto/attestation/tree/main/spec/v1 +// Validation of all fields is left to the users of this proto. +type Statement struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Expected to always be "https://in-toto.io/Statement/v1" + Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"` + Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"` + PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"` + Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Statement) Reset() { + *x = Statement{} + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Statement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Statement) ProtoMessage() {} + +func (x *Statement) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Statement.ProtoReflect.Descriptor instead. +func (*Statement) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_statement_proto_rawDescGZIP(), []int{0} +} + +func (x *Statement) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Statement) GetSubject() []*ResourceDescriptor { + if x != nil { + return x.Subject + } + return nil +} + +func (x *Statement) GetPredicateType() string { + if x != nil { + return x.PredicateType + } + return "" +} + +func (x *Statement) GetPredicate() *structpb.Struct { + if x != nil { + return x.Predicate + } + return nil +} + +var File_in_toto_attestation_v1_statement_proto protoreflect.FileDescriptor + +const file_in_toto_attestation_v1_statement_proto_rawDesc = "" + + "\n" + + "&in_toto_attestation/v1/statement.proto\x12\x16in_toto_attestation.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a0in_toto_attestation/v1/resource_descriptor.proto\"\xc4\x01\n" + + "\tStatement\x12\x13\n" + + "\x04type\x18\x01 \x01(\tR\x05_type\x12D\n" + + "\asubject\x18\x02 \x03(\v2*.in_toto_attestation.v1.ResourceDescriptorR\asubject\x12%\n" + + "\x0epredicate_type\x18\x03 \x01(\tR\rpredicateType\x125\n" + + "\tpredicate\x18\x04 \x01(\v2\x17.google.protobuf.StructR\tpredicateBG\n" + + "\x1fio.github.intoto.attestation.v1Z$github.com/in-toto/attestation/go/v1b\x06proto3" + +var ( + file_in_toto_attestation_v1_statement_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_statement_proto_rawDescData []byte +) + +func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_statement_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_statement_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_statement_proto_rawDesc), len(file_in_toto_attestation_v1_statement_proto_rawDesc))) + }) + return file_in_toto_attestation_v1_statement_proto_rawDescData +} + +var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_in_toto_attestation_v1_statement_proto_goTypes = []any{ + (*Statement)(nil), // 0: in_toto_attestation.v1.Statement + (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_statement_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.Statement.subject:type_name -> in_toto_attestation.v1.ResourceDescriptor + 2, // 1: in_toto_attestation.v1.Statement.predicate:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_statement_proto_init() } +func file_in_toto_attestation_v1_statement_proto_init() { + if File_in_toto_attestation_v1_statement_proto != nil { + return + } + file_in_toto_attestation_v1_resource_descriptor_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_statement_proto_rawDesc), len(file_in_toto_attestation_v1_statement_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_statement_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_statement_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_statement_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_statement_proto = out.File + file_in_toto_attestation_v1_statement_proto_goTypes = nil + file_in_toto_attestation_v1_statement_proto_depIdxs = nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/LICENSE b/vendor/github.com/in-toto/in-toto-golang/LICENSE new file mode 100644 index 00000000000..963ee949e8e --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/LICENSE @@ -0,0 +1,13 @@ +Copyright 2018 New York University + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go new file mode 100644 index 00000000000..73aafe7e1c4 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go @@ -0,0 +1,99 @@ +package in_toto + +import ( + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1" +) + +const ( + // StatementInTotoV01 is the statement type for the generalized link format + // containing statements. This is constant for all predicate types. + StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + // PredicateSPDX represents a SBOM using the SPDX standard. + // The SPDX mandates 'spdxVersion' field, so predicate type can omit + // version. + PredicateSPDX = "https://spdx.dev/Document" + // PredicateCycloneDX represents a CycloneDX SBOM + PredicateCycloneDX = "https://cyclonedx.org/bom" + // PredicateLinkV1 represents an in-toto 0.9 link. + PredicateLinkV1 = "https://in-toto.io/Link/v1" +) + +// Subject describes the set of software artifacts the statement applies to. +type Subject struct { + Name string `json:"name"` + Digest common.DigestSet `json:"digest"` +} + +// StatementHeader defines the common fields for all statements +type StatementHeader struct { + Type string `json:"_type"` + PredicateType string `json:"predicateType"` + Subject []Subject `json:"subject"` +} + +/* +Statement binds the attestation to a particular subject and identifies the +of the predicate. This struct represents a generic statement. +*/ +type Statement struct { + StatementHeader + // Predicate contains type speficic metadata. + Predicate interface{} `json:"predicate"` +} + +// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. +type ProvenanceStatementSLSA01 struct { + StatementHeader + Predicate slsa01.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. +type ProvenanceStatementSLSA02 struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA1 is the definition for an entire provenance statement with SLSA 1.0 predicate. +type ProvenanceStatementSLSA1 struct { + StatementHeader + Predicate slsa1.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. +// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). +type ProvenanceStatement struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// LinkStatement is the definition for an entire link statement. +type LinkStatement struct { + StatementHeader + Predicate Link `json:"predicate"` +} + +/* +SPDXStatement is the definition for an entire SPDX statement. +This is currently not implemented. Some tooling exists here: +https://github.com/spdx/tools-golang, but this software is still in +early state. +This struct is the same as the generic Statement struct but is added for +completeness +*/ +type SPDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not +currently serialized just as its SPDX counterpart. It is an empty +interface, like the generic Statement. +*/ +type CycloneDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go new file mode 100644 index 00000000000..9b1de12b182 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go @@ -0,0 +1,156 @@ +package in_toto + +import ( + "crypto/x509" + "fmt" + "net/url" +) + +const ( + AllowAllConstraint = "*" +) + +// CertificateConstraint defines the attributes a certificate must have to act as a functionary. +// A wildcard `*` allows any value in the specified attribute, where as an empty array or value +// asserts that the certificate must have nothing for that attribute. A certificate must have +// every value defined in a constraint to match. +type CertificateConstraint struct { + CommonName string `json:"common_name"` + DNSNames []string `json:"dns_names"` + Emails []string `json:"emails"` + Organizations []string `json:"organizations"` + Roots []string `json:"roots"` + URIs []string `json:"uris"` +} + +// checkResult is a data structure used to hold +// certificate constraint errors +type checkResult struct { + errors []error +} + +// newCheckResult initializes a new checkResult +func newCheckResult() *checkResult { + return &checkResult{ + errors: make([]error, 0), + } +} + +// evaluate runs a constraint check on a certificate +func (cr *checkResult) evaluate(cert *x509.Certificate, constraintCheck func(*x509.Certificate) error) *checkResult { + err := constraintCheck(cert) + if err != nil { + cr.errors = append(cr.errors, err) + } + return cr +} + +// error reduces all of the errors into one error with a +// combined error message. If there are no errors, nil +// will be returned. +func (cr *checkResult) error() error { + if len(cr.errors) == 0 { + return nil + } + return fmt.Errorf("cert failed constraints check: %+q", cr.errors) +} + +// Check tests the provided certificate against the constraint. An error is returned if the certificate +// fails any of the constraints. nil is returned if the certificate passes all of the constraints. +func (cc CertificateConstraint) Check(cert *x509.Certificate, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + return newCheckResult(). + evaluate(cert, cc.checkCommonName). + evaluate(cert, cc.checkDNSNames). + evaluate(cert, cc.checkEmails). + evaluate(cert, cc.checkOrganizations). + evaluate(cert, cc.checkRoots(rootCAIDs, rootCertPool, intermediateCertPool)). + evaluate(cert, cc.checkURIs). + error() +} + +// checkCommonName verifies that the certificate's common name matches the constraint. +func (cc CertificateConstraint) checkCommonName(cert *x509.Certificate) error { + return checkCertConstraint("common name", []string{cc.CommonName}, []string{cert.Subject.CommonName}) +} + +// checkDNSNames verifies that the certificate's dns names matches the constraint. +func (cc CertificateConstraint) checkDNSNames(cert *x509.Certificate) error { + return checkCertConstraint("dns name", cc.DNSNames, cert.DNSNames) +} + +// checkEmails verifies that the certificate's emails matches the constraint. +func (cc CertificateConstraint) checkEmails(cert *x509.Certificate) error { + return checkCertConstraint("email", cc.Emails, cert.EmailAddresses) +} + +// checkOrganizations verifies that the certificate's organizations matches the constraint. +func (cc CertificateConstraint) checkOrganizations(cert *x509.Certificate) error { + return checkCertConstraint("organization", cc.Organizations, cert.Subject.Organization) +} + +// checkRoots verifies that the certificate's roots matches the constraint. +// The certificates trust chain must also be verified. +func (cc CertificateConstraint) checkRoots(rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) func(*x509.Certificate) error { + return func(cert *x509.Certificate) error { + _, err := VerifyCertificateTrust(cert, rootCertPool, intermediateCertPool) + if err != nil { + return fmt.Errorf("failed to verify roots: %w", err) + } + return checkCertConstraint("root", cc.Roots, rootCAIDs) + } +} + +// checkURIs verifies that the certificate's URIs matches the constraint. +func (cc CertificateConstraint) checkURIs(cert *x509.Certificate) error { + return checkCertConstraint("uri", cc.URIs, urisToStrings(cert.URIs)) +} + +// urisToStrings is a helper that converts a list of URL objects to the string that represents them +func urisToStrings(uris []*url.URL) []string { + res := make([]string, 0, len(uris)) + for _, uri := range uris { + res = append(res, uri.String()) + } + + return res +} + +// checkCertConstraint tests that the provided test values match the allowed values of the constraint. +// All allowed values must be met one-to-one to be considered a successful match. +func checkCertConstraint(attributeName string, constraints, values []string) error { + // If the only constraint is to allow all, the check succeeds + if len(constraints) == 1 && constraints[0] == AllowAllConstraint { + return nil + } + + if len(constraints) == 1 && constraints[0] == "" { + constraints = []string{} + } + + if len(values) == 1 && values[0] == "" { + values = []string{} + } + + // If no constraints are specified, but the certificate has values for the attribute, then the check fails + if len(constraints) == 0 && len(values) > 0 { + return fmt.Errorf("not expecting any %s(s), but cert has %d %s(s)", attributeName, len(values), attributeName) + } + + unmet := NewSet(constraints...) + for _, v := range values { + // if the cert has a value we didn't expect, fail early + if !unmet.Has(v) { + return fmt.Errorf("cert has an unexpected %s %s given constraints %+q", attributeName, v, constraints) + } + + // consider the constraint met + unmet.Remove(v) + } + + // if we have any unmet left after going through each test value, fail. + if len(unmet) > 0 { + return fmt.Errorf("cert with %s(s) %+q did not pass all constraints %+q", attributeName, values, constraints) + } + + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go new file mode 100644 index 00000000000..2c8afff1f75 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go @@ -0,0 +1,166 @@ +package in_toto + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/secure-systems-lab/go-securesystemslib/signerverifier" +) + +// PayloadType is the payload type used for links and layouts. +const PayloadType = "application/vnd.in-toto+json" + +// ErrInvalidPayloadType indicates that the envelope used an unknown payload type +var ErrInvalidPayloadType = errors.New("unknown payload type") + +type Envelope struct { + envelope *dsse.Envelope + payload any +} + +func loadEnvelope(env *dsse.Envelope) (*Envelope, error) { + e := &Envelope{envelope: env} + + contentBytes, err := env.DecodeB64Payload() + if err != nil { + return nil, err + } + + payload, err := loadPayload(contentBytes) + if err != nil { + return nil, err + } + e.payload = payload + + return e, nil +} + +func (e *Envelope) SetPayload(payload any) error { + encodedBytes, err := cjson.EncodeCanonical(payload) + if err != nil { + return err + } + + e.payload = payload + e.envelope = &dsse.Envelope{ + Payload: base64.StdEncoding.EncodeToString(encodedBytes), + PayloadType: PayloadType, + } + + return nil +} + +func (e *Envelope) GetPayload() any { + return e.payload +} + +func (e *Envelope) VerifySignature(key Key) error { + verifier, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + ev, err := dsse.NewEnvelopeVerifier(verifier) + if err != nil { + return err + } + + _, err = ev.Verify(context.Background(), e.envelope) + return err +} + +func (e *Envelope) Sign(key Key) error { + signer, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + es, err := dsse.NewEnvelopeSigner(signer) + if err != nil { + return err + } + + payload, err := e.envelope.DecodeB64Payload() + if err != nil { + return err + } + + env, err := es.SignPayload(context.Background(), e.envelope.PayloadType, payload) + if err != nil { + return err + } + + e.envelope = env + return nil +} + +func (e *Envelope) Sigs() []Signature { + sigs := []Signature{} + for _, s := range e.envelope.Signatures { + sigs = append(sigs, Signature{ + KeyID: s.KeyID, + Sig: s.Sig, + }) + } + return sigs +} + +func (e *Envelope) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range e.Sigs() { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +func (e *Envelope) Dump(path string) error { + jsonBytes, err := json.MarshalIndent(e.envelope, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = os.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +func getSignerVerifierFromKey(key Key) (dsse.SignerVerifier, error) { + sslibKey := getSSLibKeyFromKey(key) + + switch sslibKey.KeyType { + case signerverifier.RSAKeyType: + return signerverifier.NewRSAPSSSignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ED25519KeyType: + return signerverifier.NewED25519SignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ECDSAKeyType: + return signerverifier.NewECDSASignerVerifierFromSSLibKey(&sslibKey) + } + + return nil, ErrUnsupportedKeyType +} + +func getSSLibKeyFromKey(key Key) signerverifier.SSLibKey { + return signerverifier.SSLibKey{ + KeyType: key.KeyType, + KeyIDHashAlgorithms: key.KeyIDHashAlgorithms, + KeyID: key.KeyID, + Scheme: key.Scheme, + KeyVal: signerverifier.KeyVal{ + Public: key.KeyVal.Public, + Private: key.KeyVal.Private, + Certificate: key.KeyVal.Certificate, + }, + } +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go new file mode 100644 index 00000000000..bdfc65d69f9 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go @@ -0,0 +1,30 @@ +package in_toto + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" +) + +/* +getHashMapping returns a mapping from hash algorithm to supported hash +interface. +*/ +func getHashMapping() map[string]func() hash.Hash { + return map[string]func() hash.Hash{ + "sha256": sha256.New, + "sha512": sha512.New, + "sha384": sha512.New384, + } +} + +/* +hashToHex calculates the hash over data based on hash algorithm h. +*/ +func hashToHex(h hash.Hash, data []byte) []byte { + h.Write(data) + // We need to use h.Sum(nil) here, because otherwise hash.Sum() appends + // the hash to the passed data. So instead of having only the hash + // we would get: "dataHASH" + return h.Sum(nil) +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go new file mode 100644 index 00000000000..52429ca44be --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -0,0 +1,668 @@ +package in_toto + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" +) + +// ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails +var ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") + +// ErrNoPEMBlock gets triggered when there is no PEM block in the provided file +var ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + +// ErrUnsupportedKeyType is returned when we are dealing with a key type different to ed25519 or RSA +var ErrUnsupportedKeyType = errors.New("unsupported key type") + +// ErrInvalidSignature is returned when the signature is invalid +var ErrInvalidSignature = errors.New("invalid signature") + +// ErrInvalidKey is returned when a given key is none of RSA, ECDSA or ED25519 +var ErrInvalidKey = errors.New("invalid key") + +const ( + rsaKeyType string = "rsa" + ecdsaKeyType string = "ecdsa" + ed25519KeyType string = "ed25519" + rsassapsssha256Scheme string = "rsassa-pss-sha256" + ecdsaSha2nistp224 string = "ecdsa-sha2-nistp224" + ecdsaSha2nistp256 string = "ecdsa-sha2-nistp256" + ecdsaSha2nistp384 string = "ecdsa-sha2-nistp384" + ecdsaSha2nistp521 string = "ecdsa-sha2-nistp521" + ed25519Scheme string = "ed25519" + pemPublicKey string = "PUBLIC KEY" + pemPrivateKey string = "PRIVATE KEY" + pemRSAPrivateKey string = "RSA PRIVATE KEY" +) + +/* +getSupportedKeyIDHashAlgorithms returns a string slice of supported +KeyIDHashAlgorithms. We need to use this function instead of a constant, +because Go does not support global constant slices. +*/ +func getSupportedKeyIDHashAlgorithms() Set { + return NewSet("sha256", "sha512") +} + +/* +getSupportedRSASchemes returns a string slice of supported RSA Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedRSASchemes() []string { + return []string{rsassapsssha256Scheme} +} + +/* +getSupportedEcdsaSchemes returns a string slice of supported ecdsa Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedEcdsaSchemes() []string { + return []string{ecdsaSha2nistp224, ecdsaSha2nistp256, ecdsaSha2nistp384, ecdsaSha2nistp521} +} + +/* +getSupportedEd25519Schemes returns a string slice of supported ed25519 Key +schemes. We need to use this function instead of a constant because Go does +not support global constant slices. +*/ +func getSupportedEd25519Schemes() []string { + return []string{ed25519Scheme} +} + +/* +generateKeyID creates a partial key map and generates the key ID +based on the created partial key map via the SHA256 method. +The resulting keyID will be directly saved in the corresponding key object. +On success generateKeyID will return nil, in case of errors while encoding +there will be an error. +*/ +func (k *Key) generateKeyID() error { + // Create partial key map used to create the keyid + // Unfortunately, we can't use the Key object because this also carries + // yet unwanted fields, such as KeyID and KeyVal.Private and therefore + // produces a different hash. We generate the keyID exactly as we do in + // the securesystemslib to keep interoperability between other in-toto + // implementations. + var keyToBeHashed = map[string]interface{}{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + keyCanonical, err := cjson.EncodeCanonical(keyToBeHashed) + if err != nil { + return err + } + // calculate sha256 and return string representation of keyID + keyHashed := sha256.Sum256(keyCanonical) + k.KeyID = fmt.Sprintf("%x", keyHashed) + err = validateKey(*k) + if err != nil { + return err + } + return nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +setKeyComponents sets all components in our key object. +Furthermore it makes sure to remove any trailing and leading whitespaces or newlines. +We treat key types differently for interoperability reasons to the in-toto python +implementation and the securesystemslib. +*/ +func (k *Key) setKeyComponents(pubKeyBytes []byte, privateKeyBytes []byte, keyType string, scheme string, KeyIDHashAlgorithms []string) error { + // assume we have a privateKey if the key size is bigger than 0 + + switch keyType { + case rsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemRSAPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ecdsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ed25519KeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(hex.EncodeToString(privateKeyBytes)), + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, keyType) + } + k.KeyType = keyType + k.Scheme = scheme + k.KeyIDHashAlgorithms = KeyIDHashAlgorithms + if err := k.generateKeyID(); err != nil { + return err + } + return nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + - PKCS8 + - PKCS1 + - PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parseKey(data []byte) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseCertificate(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +/* +decodeAndParse receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParse(pemBytes []byte) (*pem.Block, interface{}, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parseKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +LoadKey loads the key file at specified file path into the key object. +It automatically derives the PEM type and the key type. +Right now the following PEM types are supported: + + - PKCS1 for private keys + - PKCS8 for private keys + - PKIX for public keys + +The following key types are supported and will be automatically assigned to +the key type field: + + - ed25519 + - rsa + - ecdsa + +The following schemes are supported: + + - ed25519 -> ed25519 + - rsa -> rsassa-pss-sha256 + - ecdsa -> ecdsa-sha256-nistp256 + +Note that, this behavior is consistent with the securesystemslib, except for +ecdsa. We do not use the scheme string as key type in in-toto-golang. +Instead we are going with a ecdsa/ecdsa-sha2-nistp256 pair. + +On success it will return nil. The following errors can happen: + + - path not found or not readable + - no PEM block in the loaded file + - no valid PKCS8/PKCS1 private key or PKIX public key + - errors while marshalling + - unsupported key types +*/ +func (k *Key) LoadKey(path string, scheme string, KeyIDHashAlgorithms []string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReader(pemFile, scheme, KeyIDHashAlgorithms) + if err != nil { + return err + } + + return pemFile.Close() +} + +func (k *Key) LoadKeyDefaults(path string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReaderDefaults(pemFile) + if err != nil { + return err + } + + return pemFile.Close() +} + +// LoadKeyReader loads the key from a supplied reader. The logic matches LoadKey otherwise. +func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []string) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := io.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, KeyIDHashAlgorithms) +} + +func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := io.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + scheme, keyIDHashAlgorithms, err := getDefaultKeyScheme(key) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, keyIDHashAlgorithms) +} + +func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) { + keyIDHashAlgorithms = []string{"sha256", "sha512"} + + switch k := key.(type) { + case *rsa.PublicKey, *rsa.PrivateKey: + scheme = rsassapsssha256Scheme + case ed25519.PrivateKey, ed25519.PublicKey: + scheme = ed25519Scheme + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + scheme = ecdsaSha2nistp256 + case *x509.Certificate: + return getDefaultKeyScheme(k.PublicKey) + default: + err = ErrUnsupportedKeyType + } + + return scheme, keyIDHashAlgorithms, err +} + +func (k *Key) loadKey(keyObj interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { + switch key := keyObj.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *rsa.PrivateKey: + // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280) + // This behavior is consistent to the securesystemslib + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PublicKey: + if err := k.setKeyComponents(key, []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PrivateKey: + pubKeyBytes := key.Public() + if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *x509.Certificate: + err := k.loadKey(key.PublicKey, pemData, scheme, keyIDHashAlgorithms) + if err != nil { + return err + } + + k.KeyVal.Certificate = string(pem.EncodeToMemory(pemData)) + + default: + // We should never get here, because we implement all from Go supported Key Types + return errors.New("unexpected Error in LoadKey function") + } + + return nil +} + +/* +GenerateSignature will automatically detect the key type and sign the signable data +with the provided key. If everything goes right GenerateSignature will return +a for the key valid signature and err=nil. If something goes wrong it will +return a not initialized signature and an error. Possible errors are: + + - ErrNoPEMBlock + - ErrUnsupportedKeyType + +Currently supported is only one scheme per key. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func GenerateSignature(signable []byte, key Key) (Signature, error) { + err := validateKey(key) + if err != nil { + return Signature{}, err + } + var signature Signature + var signatureBuffer []byte + hashMapping := getHashMapping() + // The following switch block is needed for keeping interoperability + // with the securesystemslib and the python implementation + // in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded. + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), signable) + // We use rand.Reader as secure random source for rsa.SignPSS() + signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed, + &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return signature, err + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in GenerateSignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*ecdsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return Signature{}, ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), signable) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), signable) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), signable) + default: + panic("unexpected Error in GenerateSignature function") + } + // Generate the ecdsa signature on the same way, as we do in the securesystemslib + // We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES + // into an ASN.1 Object. + signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:]) + if err != nil { + return signature, err + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + privateHex, err := hex.DecodeString(key.KeyVal.Private) + if err != nil { + return signature, ErrInvalidHexString + } + // Note: We can directly use the key for signing and do not + // need to use ed25519.NewKeyFromSeed(). + signatureBuffer = ed25519.Sign(privateHex, signable) + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in GenerateSignature function") + } + signature.Sig = hex.EncodeToString(signatureBuffer) + signature.KeyID = key.KeyID + signature.Certificate = key.KeyVal.Certificate + return signature, nil +} + +/* +VerifySignature will verify unverified byte data via a passed key and signature. +Supported key types are: + + - rsa + - ed25519 + - ecdsa + +When encountering an RSA key, VerifySignature will decode the PEM block in the key +and will call rsa.VerifyPSS() for verifying the RSA signature. +When encountering an ed25519 key, VerifySignature will decode the hex string encoded +public key and will use ed25519.Verify() for verifying the ed25519 signature. +When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object +and will use the retrieved ecdsa components 'r' and 's' for verifying the signature. +On success it will return nil. In case of an unsupported key type or any other error +it will return an error. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func VerifySignature(key Key, sig Signature, unverified []byte) error { + err := validateKey(key) + if err != nil { + return err + } + sigBytes, err := hex.DecodeString(sig.Sig) + if err != nil { + return err + } + hashMapping := getHashMapping() + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*rsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), unverified) + err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return fmt.Errorf("%w: %s", ErrInvalidSignature, err) + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in VerifySignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*ecdsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), unverified) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), unverified) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), unverified) + default: + panic("unexpected Error in VerifySignature function") + } + if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok { + return ErrInvalidSignature + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + pubHex, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return ErrInvalidHexString + } + if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok { + return fmt.Errorf("%w: ed25519", ErrInvalidSignature) + } + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in VerifySignature function") + } + return nil +} + +/* +VerifyCertificateTrust verifies that the certificate has a chain of trust +to a root in rootCertPool, possibly using any intermediates in +intermediateCertPool +*/ +func VerifyCertificateTrust(cert *x509.Certificate, rootCertPool, intermediateCertPool *x509.CertPool) ([][]*x509.Certificate, error) { + verifyOptions := x509.VerifyOptions{ + Roots: rootCertPool, + Intermediates: intermediateCertPool, + } + chains, err := cert.Verify(verifyOptions) + if len(chains) == 0 || err != nil { + return nil, fmt.Errorf("cert cannot be verified by provided roots and intermediates") + } + return chains, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go new file mode 100644 index 00000000000..52373aa75f5 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go @@ -0,0 +1,227 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://golang.org/LICENSE. + +// this is a modified version of path.Match that removes handling of path separators + +package in_toto + +import ( + "errors" + "unicode/utf8" +) + +// errBadPattern indicates a pattern was malformed. +var errBadPattern = errors.New("syntax error in pattern") + +// match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-/ characters +// '?' matches any single non-/ character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +func match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches everything + return true, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + for i := 0; i < len(name); i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + // Before returning false with no error, + // check that the remainder of the pattern is syntactically valid. + for len(pattern) > 0 { + _, chunk, pattern = scanChunk(pattern) + if _, _, err := matchChunk(chunk, ""); err != nil { + return false, err + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + // failed records whether the match has failed. + // After the match fails, the loop continues on processing chunk, + // checking that the pattern is well-formed but no longer reading s. + failed := false + for len(chunk) > 0 { + if !failed && len(s) == 0 { + failed = true + } + switch chunk[0] { + case '[': + // character class + var r rune + if !failed { + var n int + r, n = utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + // possibly negated + negated := false + if len(chunk) > 0 && chunk[0] == '^' { + negated = true + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return "", false, err + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return "", false, err + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + failed = true + } + + case '?': + if !failed { + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + return "", false, errBadPattern + } + fallthrough + + default: + if !failed { + if chunk[0] != s[0] { + failed = true + } + s = s[1:] + } + chunk = chunk[1:] + } + } + if failed { + return "", false, nil + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = errBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = errBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = errBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = errBadPattern + } + return +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go new file mode 100644 index 00000000000..f56b784ea0c --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -0,0 +1,967 @@ +package in_toto + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/secure-systems-lab/go-securesystemslib/dsse" +) + +/* +KeyVal contains the actual values of a key, as opposed to key metadata such as +a key identifier or key type. For RSA keys, the key value is a pair of public +and private keys in PEM format stored as strings. For public keys the Private +field may be an empty string. +*/ +type KeyVal struct { + Private string `json:"private,omitempty"` + Public string `json:"public"` + Certificate string `json:"certificate,omitempty"` +} + +/* +Key represents a generic in-toto key that contains key metadata, such as an +identifier, supported hash algorithms to create the identifier, the key type +and the supported signature scheme, and the actual key value. +*/ +type Key struct { + KeyID string `json:"keyid"` + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` +} + +// ErrEmptyKeyField will be thrown if a field in our Key struct is empty. +var ErrEmptyKeyField = errors.New("empty field in key") + +// ErrInvalidHexString will be thrown, if a string doesn't match a hex string. +var ErrInvalidHexString = errors.New("invalid hex string") + +// ErrSchemeKeyTypeMismatch will be thrown, if the given scheme and key type are not supported together. +var ErrSchemeKeyTypeMismatch = errors.New("the scheme and key type are not supported together") + +// ErrUnsupportedKeyIDHashAlgorithms will be thrown, if the specified KeyIDHashAlgorithms is not supported. +var ErrUnsupportedKeyIDHashAlgorithms = errors.New("the given keyID hash algorithm is not supported") + +// ErrKeyKeyTypeMismatch will be thrown, if the specified keyType does not match the key +var ErrKeyKeyTypeMismatch = errors.New("the given key does not match its key type") + +// ErrNoPublicKey gets returned when the private key value is not empty. +var ErrNoPublicKey = errors.New("the given key is not a public key") + +// ErrCurveSizeSchemeMismatch gets returned, when the scheme and curve size are incompatible +// for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224" +var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size") + +/* +matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key +curve size. We do not need a full regex match here, because +our validateKey functions are already checking for a valid scheme string. +*/ +func matchEcdsaScheme(curveSize int, scheme string) error { + if !strings.HasSuffix(scheme, strconv.Itoa(curveSize)) { + return ErrCurveSizeSchemeMismatch + } + return nil +} + +/* +validateHexString is used to validate that a string passed to it contains +only valid hexadecimal characters. +*/ +func validateHexString(str string) error { + formatCheck, _ := regexp.MatchString("^[a-fA-F0-9]+$", str) + if !formatCheck { + return fmt.Errorf("%w: %s", ErrInvalidHexString, str) + } + return nil +} + +/* +validateKeyVal validates the KeyVal struct. In case of an ed25519 key, +it will check for a hex string for private and public key. In any other +case, validateKeyVal will try to decode the PEM block. If this succeeds, +we have a valid PEM block in our KeyVal struct. On success it will return nil +on failure it will return the corresponding error. This can be either +an ErrInvalidHexString, an ErrNoPEMBlock or an ErrUnsupportedKeyType +if the KeyType is unknown. +*/ +func validateKeyVal(key Key) error { + switch key.KeyType { + case ed25519KeyType: + // We cannot use matchPublicKeyKeyType or matchPrivateKeyKeyType here, + // because we retrieve the key not from PEM. Hence we are dealing with + // plain ed25519 key bytes. These bytes can't be typechecked like in the + // matchKeyKeytype functions. + err := validateHexString(key.KeyVal.Public) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + err := validateHexString(key.KeyVal.Private) + if err != nil { + return err + } + } + case rsaKeyType, ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + err = matchPublicKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return err + } + err = matchPrivateKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + } + default: + return ErrUnsupportedKeyType + } + return nil +} + +/* +matchPublicKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA public key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPublicKeyKeyType(key interface{}, keyType string) error { + switch key.(type) { + case *rsa.PublicKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PublicKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchPrivateKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA private key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPrivateKeyKeyType(key interface{}, keyType string) error { + // we can only check RSA and ECDSA this way, because we are storing them in PEM + // format. ed25519 keys are stored as plain ed25519 keys encoded as hex strings + // so we have no metadata for them. + switch key.(type) { + case *rsa.PrivateKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PrivateKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchKeyTypeScheme checks if the specified scheme matches our specified +keyType. If the keyType is not supported it will return an +ErrUnsupportedKeyType. If the keyType and scheme do not match it will return +an ErrSchemeKeyTypeMismatch. If the specified keyType and scheme are +compatible matchKeyTypeScheme will return nil. +*/ +func matchKeyTypeScheme(key Key) error { + switch key.KeyType { + case rsaKeyType: + for _, scheme := range getSupportedRSASchemes() { + if key.Scheme == scheme { + return nil + } + } + case ed25519KeyType: + for _, scheme := range getSupportedEd25519Schemes() { + if key.Scheme == scheme { + return nil + } + } + case ecdsaKeyType: + for _, scheme := range getSupportedEcdsaSchemes() { + if key.Scheme == scheme { + return nil + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, key.KeyType) + } + return ErrSchemeKeyTypeMismatch +} + +/* +validateKey checks the outer key object (everything, except the KeyVal struct). +It verifies the keyID for being a hex string and checks for empty fields. +On success it will return nil, on error it will return the corresponding error. +Either: ErrEmptyKeyField or ErrInvalidHexString. +*/ +func validateKey(key Key) error { + err := validateHexString(key.KeyID) + if err != nil { + return err + } + // This probably can be done more elegant with reflection + // but we care about performance, do we?! + if key.KeyType == "" { + return fmt.Errorf("%w: keytype", ErrEmptyKeyField) + } + if key.KeyVal.Public == "" && key.KeyVal.Certificate == "" { + return fmt.Errorf("%w: keyval.public and keyval.certificate cannot both be blank", ErrEmptyKeyField) + } + if key.Scheme == "" { + return fmt.Errorf("%w: scheme", ErrEmptyKeyField) + } + err = matchKeyTypeScheme(key) + if err != nil { + return err + } + // only check for supported KeyIDHashAlgorithms, if the variable has been set + if key.KeyIDHashAlgorithms != nil { + supportedKeyIDHashAlgorithms := getSupportedKeyIDHashAlgorithms() + if !supportedKeyIDHashAlgorithms.IsSubSet(NewSet(key.KeyIDHashAlgorithms...)) { + return fmt.Errorf("%w: %#v, supported are: %#v", ErrUnsupportedKeyIDHashAlgorithms, key.KeyIDHashAlgorithms, getSupportedKeyIDHashAlgorithms()) + } + } + return nil +} + +/* +validatePublicKey is a wrapper around validateKey. It test if the private key +value in the key is empty and then validates the key via calling validateKey. +On success it will return nil, on error it will return an ErrNoPublicKey error. +*/ +func validatePublicKey(key Key) error { + if key.KeyVal.Private != "" { + return ErrNoPublicKey + } + err := validateKey(key) + if err != nil { + return err + } + return nil +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the Key, which was used to create the signature and the signature data. The +used signature scheme is found in the corresponding Key. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` + Certificate string `json:"cert,omitempty"` +} + +// GetCertificate returns the parsed x509 certificate attached to the signature, +// if it exists. +func (sig Signature) GetCertificate() (Key, error) { + key := Key{} + if len(sig.Certificate) == 0 { + return key, errors.New("Signature has empty Certificate") + } + + err := key.LoadKeyReaderDefaults(strings.NewReader(sig.Certificate)) + return key, err +} + +/* +validateSignature is a function used to check if a passed signature is valid, +by inspecting the key ID and the signature itself. +*/ +func validateSignature(signature Signature) error { + if err := validateHexString(signature.KeyID); err != nil { + return err + } + if err := validateHexString(signature.Sig); err != nil { + return err + } + return nil +} + +/* +validateSliceOfSignatures is a helper function used to validate multiple +signatures stored in a slice. +*/ +func validateSliceOfSignatures(slice []Signature) error { + for _, signature := range slice { + if err := validateSignature(signature); err != nil { + return err + } + } + return nil +} + +/* +Link represents the evidence of a supply chain step performed by a functionary. +It should be contained in a generic Metablock object, which provides +functionality for signing and signature verification, and reading from and +writing to disk. +*/ +type Link struct { + Type string `json:"_type"` + Name string `json:"name"` + Materials map[string]interface{} `json:"materials"` + Products map[string]interface{} `json:"products"` + ByProducts map[string]interface{} `json:"byproducts"` + Command []string `json:"command"` + Environment map[string]interface{} `json:"environment"` +} + +/* +validateArtifacts is a general function used to validate products and materials. +*/ +func validateArtifacts(artifacts map[string]interface{}) error { + for artifactName, artifact := range artifacts { + artifactValue := reflect.ValueOf(artifact).MapRange() + for artifactValue.Next() { + value := artifactValue.Value().Interface().(string) + hashType := artifactValue.Key().Interface().(string) + if err := validateHexString(value); err != nil { + return fmt.Errorf("in artifact '%s', %s hash value: %s", + artifactName, hashType, err.Error()) + } + } + } + return nil +} + +/* +validateLink is a function used to ensure that a passed item of type Link +matches the necessary format. +*/ +func validateLink(link Link) error { + if link.Type != "link" { + return fmt.Errorf("invalid type for link '%s': should be 'link'", + link.Name) + } + + if err := validateArtifacts(link.Materials); err != nil { + return fmt.Errorf("in materials of link '%s': %s", link.Name, + err.Error()) + } + + if err := validateArtifacts(link.Products); err != nil { + return fmt.Errorf("in products of link '%s': %s", link.Name, + err.Error()) + } + + return nil +} + +/* +LinkNameFormat represents a format string used to create the filename for a +signed Link (wrapped in a Metablock). It consists of the name of the link and +the first 8 characters of the signing key id. E.g.: + + fmt.Sprintf(LinkNameFormat, "package", + "2f89b9272acfc8f4a0a0f094d789fdb0ba798b0fe41f2f5f417c12f0085ff498") + // returns "package.2f89b9272.link" +*/ +const LinkNameFormat = "%s.%.8s.link" +const PreliminaryLinkNameFormat = ".%s.%.8s.link-unfinished" + +/* +LinkNameFormatShort is for links that are not signed, e.g.: + + fmt.Sprintf(LinkNameFormatShort, "unsigned") + // returns "unsigned.link" +*/ +const LinkNameFormatShort = "%s.link" +const LinkGlobFormat = "%s.????????.link" + +/* +SublayoutLinkDirFormat represents the format of the name of the directory for +sublayout links during the verification workflow. +*/ +const SublayoutLinkDirFormat = "%s.%.8s" + +/* +SupplyChainItem summarizes common fields of the two available supply chain +item types, Inspection and Step. +*/ +type SupplyChainItem struct { + Name string `json:"name"` + ExpectedMaterials [][]string `json:"expected_materials"` + ExpectedProducts [][]string `json:"expected_products"` +} + +/* +validateArtifactRule calls UnpackRule to validate that the passed rule conforms +with any of the available rule formats. +*/ +func validateArtifactRule(rule []string) error { + if _, err := UnpackRule(rule); err != nil { + return err + } + return nil +} + +/* +validateSliceOfArtifactRules iterates over passed rules to validate them. +*/ +func validateSliceOfArtifactRules(rules [][]string) error { + for _, rule := range rules { + if err := validateArtifactRule(rule); err != nil { + return err + } + } + return nil +} + +/* +validateSupplyChainItem is used to validate the common elements found in both +steps and inspections. Here, the function primarily ensures that the name of +a supply chain item isn't empty. +*/ +func validateSupplyChainItem(item SupplyChainItem) error { + if item.Name == "" { + return fmt.Errorf("name cannot be empty") + } + + if err := validateSliceOfArtifactRules(item.ExpectedMaterials); err != nil { + return fmt.Errorf("invalid material rule: %s", err) + } + if err := validateSliceOfArtifactRules(item.ExpectedProducts); err != nil { + return fmt.Errorf("invalid product rule: %s", err) + } + return nil +} + +/* +Inspection represents an in-toto supply chain inspection, whose command in the +Run field is executed during final product verification, generating unsigned +link metadata. Materials and products used/produced by the inspection are +constrained by the artifact rules in the inspection's ExpectedMaterials and +ExpectedProducts fields. +*/ +type Inspection struct { + Type string `json:"_type"` + Run []string `json:"run"` + SupplyChainItem +} + +/* +validateInspection ensures that a passed inspection is valid and matches the +necessary format of an inspection. +*/ +func validateInspection(inspection Inspection) error { + if err := validateSupplyChainItem(inspection.SupplyChainItem); err != nil { + return fmt.Errorf("inspection %s", err.Error()) + } + if inspection.Type != "inspection" { + return fmt.Errorf("invalid Type value for inspection '%s': should be "+ + "'inspection'", inspection.SupplyChainItem.Name) + } + return nil +} + +/* +Step represents an in-toto step of the supply chain performed by a functionary. +During final product verification in-toto looks for corresponding Link +metadata, which is used as signed evidence that the step was performed +according to the supply chain definition. Materials and products used/produced +by the step are constrained by the artifact rules in the step's +ExpectedMaterials and ExpectedProducts fields. +*/ +type Step struct { + Type string `json:"_type"` + PubKeys []string `json:"pubkeys"` + CertificateConstraints []CertificateConstraint `json:"cert_constraints,omitempty"` + ExpectedCommand []string `json:"expected_command"` + Threshold int `json:"threshold"` + SupplyChainItem +} + +// CheckCertConstraints returns true if the provided certificate matches at least one +// of the constraints for this step. +func (s Step) CheckCertConstraints(key Key, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + if len(s.CertificateConstraints) == 0 { + return fmt.Errorf("no constraints found") + } + + _, possibleCert, err := decodeAndParse([]byte(key.KeyVal.Certificate)) + if err != nil { + return err + } + + cert, ok := possibleCert.(*x509.Certificate) + if !ok { + return fmt.Errorf("not a valid certificate") + } + + for _, constraint := range s.CertificateConstraints { + err = constraint.Check(cert, rootCAIDs, rootCertPool, intermediateCertPool) + if err == nil { + return nil + } + } + if err != nil { + return err + } + + // this should not be reachable since there is at least one constraint, and the for loop only saw err != nil + return fmt.Errorf("unknown certificate constraint error") +} + +/* +validateStep ensures that a passed step is valid and matches the +necessary format of an step. +*/ +func validateStep(step Step) error { + if err := validateSupplyChainItem(step.SupplyChainItem); err != nil { + return fmt.Errorf("step %s", err.Error()) + } + if step.Type != "step" { + return fmt.Errorf("invalid Type value for step '%s': should be 'step'", + step.SupplyChainItem.Name) + } + for _, keyID := range step.PubKeys { + if err := validateHexString(keyID); err != nil { + return err + } + } + return nil +} + +/* +ISO8601DateSchema defines the format string of a timestamp following the +ISO 8601 standard. +*/ +const ISO8601DateSchema = "2006-01-02T15:04:05Z" + +/* +Layout represents the definition of a software supply chain. It lists the +sequence of steps required in the software supply chain and the functionaries +authorized to perform these steps. Functionaries are identified by their +public keys. In addition, the layout may list a sequence of inspections that +are executed during in-toto supply chain verification. A layout should be +contained in a generic Metablock object, which provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Layout struct { + Type string `json:"_type"` + Steps []Step `json:"steps"` + Inspect []Inspection `json:"inspect"` + Keys map[string]Key `json:"keys"` + RootCas map[string]Key `json:"rootcas,omitempty"` + IntermediateCas map[string]Key `json:"intermediatecas,omitempty"` + Expires string `json:"expires"` + Readme string `json:"readme"` +} + +// Go does not allow to pass `[]T` (slice with certain type) to a function +// that accepts `[]interface{}` (slice with generic type) +// We have to manually create the interface slice first, see +// https://golang.org/doc/faq#convert_slice_of_interface +// TODO: Is there a better way to do polymorphism for steps and inspections? +func (l *Layout) stepsAsInterfaceSlice() []interface{} { + stepsI := make([]interface{}, len(l.Steps)) + for i, v := range l.Steps { + stepsI[i] = v + } + return stepsI +} +func (l *Layout) inspectAsInterfaceSlice() []interface{} { + inspectionsI := make([]interface{}, len(l.Inspect)) + for i, v := range l.Inspect { + inspectionsI[i] = v + } + return inspectionsI +} + +// RootCAIDs returns a slice of all of the Root CA IDs +func (l *Layout) RootCAIDs() []string { + rootCAIDs := make([]string, 0, len(l.RootCas)) + for rootCAID := range l.RootCas { + rootCAIDs = append(rootCAIDs, rootCAID) + } + return rootCAIDs +} + +func validateLayoutKeys(keys map[string]Key) error { + for keyID, key := range keys { + if key.KeyID != keyID { + return fmt.Errorf("invalid key found") + } + err := validatePublicKey(key) + if err != nil { + return err + } + } + + return nil +} + +/* +validateLayout is a function used to ensure that a passed item of type Layout +matches the necessary format. +*/ +func validateLayout(layout Layout) error { + if layout.Type != "layout" { + return fmt.Errorf("invalid Type value for layout: should be 'layout'") + } + + if _, err := time.Parse(ISO8601DateSchema, layout.Expires); err != nil { + return fmt.Errorf("expiry time parsed incorrectly - date either" + + " invalid or of incorrect format") + } + + if err := validateLayoutKeys(layout.Keys); err != nil { + return err + } + + if err := validateLayoutKeys(layout.RootCas); err != nil { + return err + } + + if err := validateLayoutKeys(layout.IntermediateCas); err != nil { + return err + } + + var namesSeen = make(map[string]bool) + for _, step := range layout.Steps { + if namesSeen[step.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[step.Name] = true + + if err := validateStep(step); err != nil { + return err + } + } + for _, inspection := range layout.Inspect { + if namesSeen[inspection.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[inspection.Name] = true + } + return nil +} + +type Metadata interface { + Sign(Key) error + VerifySignature(Key) error + GetPayload() any + Sigs() []Signature + GetSignatureForKeyID(string) (Signature, error) + Dump(string) error +} + +func LoadMetadata(path string) (Metadata, error) { + jsonBytes, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var rawData map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawData); err != nil { + return nil, err + } + + if _, ok := rawData["payloadType"]; ok { + dsseEnv := &dsse.Envelope{} + if rawData["payload"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata envelope requires 'payload' and 'signatures' parts") + } + + if err := json.Unmarshal(jsonBytes, dsseEnv); err != nil { + return nil, err + } + + if dsseEnv.PayloadType != PayloadType { + return nil, ErrInvalidPayloadType + } + + return loadEnvelope(dsseEnv) + } + + mb := &Metablock{} + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawData["signed"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata requires 'signed' and 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawData["signatures"], &mb.Signatures); err != nil { + return nil, err + } + + payload, err := loadPayload(*rawData["signed"]) + if err != nil { + return nil, err + } + + mb.Signed = payload + + return mb, nil +} + +/* +Metablock is a generic container for signable in-toto objects such as Layout +or Link. It has two fields, one that contains the signable object and one that +contains corresponding signatures. Metablock also provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Metablock struct { + // NOTE: Whenever we want to access an attribute of `Signed` we have to + // perform type assertion, e.g. `metablock.Signed.(Layout).Keys` + // Maybe there is a better way to store either Layouts or Links in `Signed`? + // The notary folks seem to have separate container structs: + // https://github.com/theupdateframework/notary/blob/master/tuf/data/root.go#L10-L14 + // https://github.com/theupdateframework/notary/blob/master/tuf/data/targets.go#L13-L17 + // I implemented it this way, because there will be several functions that + // receive or return a Metablock, where the type of Signed has to be inferred + // on runtime, e.g. when iterating over links for a layout, and a link can + // turn out to be a layout (sublayout) + Signed interface{} `json:"signed"` + Signatures []Signature `json:"signatures"` +} + +type jsonField struct { + name string + omitempty bool +} + +/* +checkRequiredJSONFields checks that the passed map (obj) has keys for each of +the json tags in the passed struct type (typ), and returns an error otherwise. +Any json tags that contain the "omitempty" option be allowed to be optional. +*/ +func checkRequiredJSONFields(obj map[string]interface{}, + typ reflect.Type) error { + + // Create list of json tags, e.g. `json:"_type"` + attributeCount := typ.NumField() + allFields := make([]jsonField, 0) + for i := 0; i < attributeCount; i++ { + fieldStr := typ.Field(i).Tag.Get("json") + field := jsonField{ + name: fieldStr, + omitempty: false, + } + + if idx := strings.Index(fieldStr, ","); idx != -1 { + field.name = fieldStr[:idx] + field.omitempty = strings.Contains(fieldStr[idx+1:], "omitempty") + } + + allFields = append(allFields, field) + } + + // Assert that there's a key in the passed map for each tag + for _, field := range allFields { + if _, ok := obj[field.name]; !ok && !field.omitempty { + return fmt.Errorf("required field %s missing", field.name) + } + } + return nil +} + +/* +Load parses JSON formatted metadata at the passed path into the Metablock +object on which it was called. It returns an error if it cannot parse +a valid JSON formatted Metablock that contains a Link or Layout. + +Deprecated: Use LoadMetadata for a signature wrapper agnostic way to load an +envelope. +*/ +func (mb *Metablock) Load(path string) error { + // Read entire file + jsonBytes, err := os.ReadFile(path) + if err != nil { + return err + } + + // Unmarshal JSON into a map of raw messages (signed and signatures) + // We can't fully unmarshal immediately, because we need to inspect the + // type (link or layout) to decide which data structure to use + var rawMb map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawMb); err != nil { + return err + } + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawMb["signed"] == nil || rawMb["signatures"] == nil { + return fmt.Errorf("in-toto metadata requires 'signed' and" + + " 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawMb["signatures"], &mb.Signatures); err != nil { + return err + } + + payload, err := loadPayload(*rawMb["signed"]) + if err != nil { + return err + } + + mb.Signed = payload + + return nil +} + +/* +Dump JSON serializes and writes the Metablock on which it was called to the +passed path. It returns an error if JSON serialization or writing fails. +*/ +func (mb *Metablock) Dump(path string) error { + // JSON encode Metablock formatted with newlines and indentation + // TODO: parametrize format + jsonBytes, err := json.MarshalIndent(mb, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = os.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +/* +GetSignableRepresentation returns the canonical JSON representation of the +Signed field of the Metablock on which it was called. If canonicalization +fails the first return value is nil and the second return value is the error. +*/ +func (mb *Metablock) GetSignableRepresentation() ([]byte, error) { + return cjson.EncodeCanonical(mb.Signed) +} + +func (mb *Metablock) GetPayload() any { + return mb.Signed +} + +func (mb *Metablock) Sigs() []Signature { + return mb.Signatures +} + +/* +VerifySignature verifies the first signature, corresponding to the passed Key, +that it finds in the Signatures field of the Metablock on which it was called. +It returns an error if Signatures does not contain a Signature corresponding to +the passed Key, the object in Signed cannot be canonicalized, or the Signature +is invalid. +*/ +func (mb *Metablock) VerifySignature(key Key) error { + sig, err := mb.GetSignatureForKeyID(key.KeyID) + if err != nil { + return err + } + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + if err := VerifySignature(key, sig, dataCanonical); err != nil { + return err + } + return nil +} + +// GetSignatureForKeyID returns the signature that was created by the provided keyID, if it exists. +func (mb *Metablock) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range mb.Signatures { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +/* +ValidateMetablock ensures that a passed Metablock object is valid. It indirectly +validates the Link or Layout that the Metablock object contains. +*/ +func ValidateMetablock(mb Metablock) error { + switch mbSignedType := mb.Signed.(type) { + case Layout: + if err := validateLayout(mb.Signed.(Layout)); err != nil { + return err + } + case Link: + if err := validateLink(mb.Signed.(Link)); err != nil { + return err + } + default: + return fmt.Errorf("unknown type '%s', should be 'layout' or 'link'", + mbSignedType) + } + + if err := validateSliceOfSignatures(mb.Signatures); err != nil { + return err + } + + return nil +} + +/* +Sign creates a signature over the signed portion of the metablock using the Key +object provided. It then appends the resulting signature to the signatures +field as provided. It returns an error if the Signed object cannot be +canonicalized, or if the key is invalid or not supported. +*/ +func (mb *Metablock) Sign(key Key) error { + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + newSignature, err := GenerateSignature(dataCanonical, key) + if err != nil { + return err + } + + mb.Signatures = append(mb.Signatures, newSignature) + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go new file mode 100644 index 00000000000..1bba77c39e5 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go @@ -0,0 +1,131 @@ +package in_toto + +import ( + "fmt" + "strings" +) + +// An error message issued in UnpackRule if it receives a malformed rule. +var errorMsg = "Wrong rule format, available formats are:\n" + + "\tMATCH [IN ] WITH (MATERIALS|PRODUCTS)" + + " [IN ] FROM ,\n" + + "\tCREATE ,\n" + + "\tDELETE ,\n" + + "\tMODIFY ,\n" + + "\tALLOW ,\n" + + "\tDISALLOW ,\n" + + "\tREQUIRE \n\n" + +/* +UnpackRule parses the passed rule and extracts and returns the information +required for rule processing. It can be used to verify if a rule has a valid +format. Available rule formats are: + + MATCH [IN ] WITH (MATERIALS|PRODUCTS) + [IN ] FROM , + CREATE , + DELETE , + MODIFY , + ALLOW , + DISALLOW + +Rule tokens are normalized to lower case before returning. The returned map +has the following format: + + { + "type": "match" | "create" | "delete" |"modify" | "allow" | "disallow" + "pattern": "", + "srcPrefix": "", // MATCH rule only + "dstPrefix": "", // MATCH rule only + "dstType": "materials" | "products">, // MATCH rule only + "dstName": "", // Match rule only + } + +If the rule does not match any of the available formats the first return value +is nil and the second return value is the error. +*/ +func UnpackRule(rule []string) (map[string]string, error) { + // Cache rule len + ruleLen := len(rule) + + // Create all lower rule copy to case-insensitively parse out tokens whose + // position we don't know yet. We keep the original rule to retain the + // non-token elements' case. + ruleLower := make([]string, ruleLen) + for i, val := range rule { + ruleLower[i] = strings.ToLower(val) + } + + switch ruleLower[0] { + case "create", "modify", "delete", "allow", "disallow", "require": + if ruleLen != 2 { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + }, nil + + case "match": + var srcPrefix string + var dstType string + var dstPrefix string + var dstName string + + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // IN FROM + if ruleLen == 10 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "in" && + ruleLower[8] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = rule[7] + dstName = rule[9] + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // FROM + } else if ruleLen == 8 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = "" + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) IN + // FROM + } else if ruleLen == 8 && ruleLower[2] == "with" && + ruleLower[4] == "in" && ruleLower[6] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = rule[5] + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) FROM + } else if ruleLen == 6 && ruleLower[2] == "with" && + ruleLower[4] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = "" + dstName = rule[5] + + } else { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + "srcPrefix": srcPrefix, + "dstPrefix": dstPrefix, + "dstType": dstType, + "dstName": dstName, + }, nil + + default: + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go new file mode 100644 index 00000000000..f0a55d82199 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -0,0 +1,462 @@ +package in_toto + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + + "github.com/shibumi/go-pathspec" +) + +// ErrSymCycle signals a detected symlink cycle in our RecordArtifacts() function. +var ErrSymCycle = errors.New("symlink cycle detected") + +// ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping +var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected") + +var ErrEmptyCommandArgs = errors.New("the command args are empty") + +// visitedSymlinks is a hashset that contains all paths that we have visited. +var visitedSymlinks Set + +/* +RecordArtifact reads and hashes the contents of the file at the passed path +using sha256 and returns a map in the following format: + + { + "": { + "sha256": + } + } + +If reading the file fails, the first return value is nil and the second return +value is the error. +NOTE: For cross-platform consistency Windows-style line separators (CRLF) are +normalized to Unix-style line separators (LF) before hashing file contents. +*/ +func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { + supportedHashMappings := getHashMapping() + // Read file from passed path + contents, err := os.ReadFile(path) + hashedContentsMap := make(map[string]interface{}) + if err != nil { + return nil, err + } + + if lineNormalization { + // "Normalize" file contents. We convert all line separators to '\n' + // for keeping operating system independence + contents = bytes.ReplaceAll(contents, []byte("\r\n"), []byte("\n")) + contents = bytes.ReplaceAll(contents, []byte("\r"), []byte("\n")) + } + + // Create a map of all the hashes present in the hash_func list + for _, element := range hashAlgorithms { + if _, ok := supportedHashMappings[element]; !ok { + return nil, fmt.Errorf("%w: %s", ErrUnsupportedHashAlgorithm, element) + } + h := supportedHashMappings[element] + result := fmt.Sprintf("%x", hashToHex(h(), contents)) + hashedContentsMap[element] = result + } + + // Return it in a format that is conformant with link metadata artifacts + return hashedContentsMap, nil +} + +/* +RecordArtifacts is a wrapper around recordArtifacts. +RecordArtifacts initializes a set for storing visited symlinks, +calls recordArtifacts and deletes the set if no longer needed. +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (evalArtifacts map[string]interface{}, err error) { + // Make sure to initialize a fresh hashset for every RecordArtifacts call + visitedSymlinks = NewSet() + evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + // pass result and error through + return evalArtifacts, err +} + +/* +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (map[string]interface{}, error) { + artifacts := make(map[string]interface{}) + for _, path := range paths { + err := filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + // Abort if Walk function has a problem, + // e.g. path does not exist + if err != nil { + return err + } + // We need to call pathspec.GitIgnore inside of our filepath.Walk, because otherwise + // we will not catch all paths. Just imagine a path like "." and a pattern like "*.pub". + // If we would call pathspec outside of the filepath.Walk this would not match. + ignore, err := pathspec.GitIgnore(gitignorePatterns, path) + if err != nil { + return err + } + if ignore { + return nil + } + // Don't hash directories + if info.IsDir() { + return nil + } + + // check for symlink and evaluate the last element in a symlink + // chain via filepath.EvalSymlinks. We use EvalSymlinks here, + // because with os.Readlink() we would just read the next + // element in a possible symlink chain. This would mean more + // iterations. infoMode()&os.ModeSymlink uses the file + // type bitmask to check for a symlink. + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + // return with error if we detect a symlink cycle + if ok := visitedSymlinks.Has(path); ok { + // this error will get passed through + // to RecordArtifacts() + return ErrSymCycle + } + evalSym, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + info, err := os.Stat(evalSym) + if err != nil { + return err + } + targetIsDir := false + if info.IsDir() { + if !followSymlinkDirs { + // We don't follow symlinked directories + return nil + } + targetIsDir = true + } + // add symlink to visitedSymlinks set + // this way, we know which link we have visited already + // if we visit a symlink twice, we have detected a symlink cycle + visitedSymlinks.Add(path) + // We recursively call recordArtifacts() to follow + // the new path. + evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + if evalErr != nil { + return evalErr + } + for key, value := range evalArtifacts { + if targetIsDir { + symlinkPath := filepath.Join(path, strings.TrimPrefix(key, evalSym)) + artifacts[symlinkPath] = value + } else { + artifacts[path] = value + } + } + return nil + } + artifact, err := RecordArtifact(path, hashAlgorithms, lineNormalization) + // Abort if artifact can't be recorded, e.g. + // due to file permissions + if err != nil { + return err + } + + for _, strip := range lStripPaths { + if strings.HasPrefix(path, strip) { + path = strings.TrimPrefix(path, strip) + break + } + } + // Check if path is unique + if _, exists := artifacts[path]; exists { + return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path) + } + artifacts[path] = artifact + return nil + }) + + if err != nil { + return nil, err + } + } + + return artifacts, nil +} + +/* +waitErrToExitCode converts an error returned by Cmd.wait() to an exit code. It +returns -1 if no exit code can be inferred. +*/ +func waitErrToExitCode(err error) int { + // If there's no exit code, we return -1 + retVal := -1 + + // See https://stackoverflow.com/questions/10385551/get-exit-code-go + if err != nil { + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + // This works on both Unix and Windows. Although package + // syscall is generally platform dependent, WaitStatus is + // defined for both Unix and Windows and in both cases has + // an ExitStatus() method with the same signature. + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + retVal = status.ExitStatus() + } + } + } else { + retVal = 0 + } + + return retVal +} + +/* +RunCommand executes the passed command in a subprocess. The first element of +cmdArgs is used as executable and the rest as command arguments. It captures +and returns stdout, stderr and exit code. The format of the returned map is: + + { + "return-value": , + "stdout": "", + "stderr": "" + } + +If the command cannot be executed or no pipes for stdout or stderr can be +created the first return value is nil and the second return value is the error. +NOTE: Since stdout and stderr are captured, they cannot be seen during the +command execution. +*/ +func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) { + if len(cmdArgs) == 0 { + return nil, ErrEmptyCommandArgs + } + + cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) + + if runDir != "" { + cmd.Dir = runDir + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + // TODO: duplicate stdout, stderr + stdout, _ := io.ReadAll(stdoutPipe) + stderr, _ := io.ReadAll(stderrPipe) + + retVal := waitErrToExitCode(cmd.Wait()) + + return map[string]interface{}{ + "return-value": float64(retVal), + "stdout": string(stdout), + "stderr": string(stderr), + }, nil +} + +/* +InTotoRun executes commands, e.g. for software supply chain steps or +inspections of an in-toto layout, and creates and returns corresponding link +metadata. Link metadata contains recorded products at the passed productPaths +and materials at the passed materialPaths. The returned link is wrapped in a +Metablock object. If command execution or artifact recording fails the first +return value is an empty Metablock and the second return value is the error. +*/ +func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + if err != nil { + return nil, err + } + + // make sure that we only run RunCommand if cmdArgs is not nil or empty + byProducts := map[string]interface{}{} + if len(cmdArgs) != 0 { + byProducts, err = RunCommand(cmdArgs, runDir) + if err != nil { + return nil, err + } + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + if err != nil { + return nil, err + } + + link := Link{ + Type: "link", + Name: name, + Materials: materials, + Products: products, + ByProducts: byProducts, + Command: cmdArgs, + Environment: map[string]interface{}{}, + } + + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return nil, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStart begins the creation of a link metablock file in two steps, +in order to provide evidence for supply chain steps that cannot be carries out +by a single command. InTotoRecordStart collects the hashes of the materials +before any commands are run, signs the unfinished link, and returns the link. +*/ +func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + if err != nil { + return nil, err + } + + link := Link{ + Type: "link", + Name: name, + Materials: materials, + Products: map[string]interface{}{}, + ByProducts: map[string]interface{}{}, + Command: []string{}, + Environment: map[string]interface{}{}, + } + + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} + linkMb.Signatures = []Signature{} + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return nil, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStop ends the creation of a metatadata link file created by +InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock +created by InTotoRecordStart and records the hashes of any products creted by +commands run between InTotoRecordStart and InTotoRecordStop. The resultant +finished link metablock is then signed by the provided key and returned. +*/ +func InTotoRecordStop(prelimLinkEnv Metadata, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + if err := prelimLinkEnv.VerifySignature(key); err != nil { + return nil, err + } + + link, ok := prelimLinkEnv.GetPayload().(Link) + if !ok { + return nil, errors.New("invalid metadata block") + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) + if err != nil { + return nil, err + } + + link.Products = products + + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go new file mode 100644 index 00000000000..a45a4546346 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go @@ -0,0 +1,16 @@ +package common + +// DigestSet contains a set of digests. It is represented as a map from +// algorithm name to lowercase hex-encoded value. +type DigestSet map[string]string + +// ProvenanceBuilder idenfifies the entity that executed the build steps. +type ProvenanceBuilder struct { + ID string `json:"id"` +} + +// ProvenanceMaterial defines the materials used to build an artifact. +type ProvenanceMaterial struct { + URI string `json:"uri,omitempty"` + Digest DigestSet `json:"digest,omitempty"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go new file mode 100644 index 00000000000..5978e9229d9 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go @@ -0,0 +1,50 @@ +package v01 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.1" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + Builder common.ProvenanceBuilder `json:"builder"` + Recipe ProvenanceRecipe `json:"recipe"` + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` + Materials []common.ProvenanceMaterial `json:"materials,omitempty"` +} + +// ProvenanceRecipe describes the actions performed by the builder. +type ProvenanceRecipe struct { + Type string `json:"type"` + // DefinedInMaterial can be sent as the null pointer to indicate that + // the value is not present. + DefinedInMaterial *int `json:"definedInMaterial,omitempty"` + EntryPoint string `json:"entryPoint"` + Arguments interface{} `json:"arguments,omitempty"` + Environment interface{} `json:"environment,omitempty"` +} + +// ProvenanceMetadata contains metadata for the built artifact. +type ProvenanceMetadata struct { + // Use pointer to make sure that the abscense of a time is not + // encoded as the Epoch time. + BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"` + BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"` + Completeness ProvenanceComplete `json:"completeness"` + Reproducible bool `json:"reproducible"` +} + +// ProvenanceComplete indicates wheter the claims in build/recipe are complete. +// For in depth information refer to the specifictaion: +// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md +type ProvenanceComplete struct { + Arguments bool `json:"arguments"` + Environment bool `json:"environment"` + Materials bool `json:"materials"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go new file mode 100644 index 00000000000..40416e29a85 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go @@ -0,0 +1,144 @@ +package v02 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2" +) + +// These are type aliases to common to avoid backwards incompatible changes. +type ( + DigestSet = common.DigestSet + ProvenanceBuilder = common.ProvenanceBuilder + ProvenanceMaterial = common.ProvenanceMaterial +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + // Builder identifies the entity that executed the invocation, which is trusted to have + // correctly performed the operation and populated this provenance. + // + // The identity MUST reflect the trust base that consumers care about. How detailed to be is a + // judgement call. For example, GitHub Actions supports both GitHub-hosted runners and + // self-hosted runners. The GitHub-hosted runner might be a single identity because it’s all + // GitHub from the consumer’s perspective. Meanwhile, each self-hosted runner might have its + // own identity because not all runners are trusted by all consumers. + Builder common.ProvenanceBuilder `json:"builder"` + + // BuildType is a URI indicating what type of build was performed. It determines the meaning of + // [Invocation], [BuildConfig] and [Materials]. + BuildType string `json:"buildType"` + + // Invocation identifies the event that kicked off the build. When combined with materials, + // this SHOULD fully describe the build, such that re-running this invocation results in + // bit-for-bit identical output (if the build is reproducible). + // + // MAY be unset/null if unknown, but this is DISCOURAGED. + Invocation ProvenanceInvocation `json:"invocation,omitempty"` + + // BuildConfig lists the steps in the build. If [ProvenanceInvocation.ConfigSource] is not + // available, BuildConfig can be used to verify information about the build. + // + // This is an arbitrary JSON object with a schema defined by [BuildType]. + BuildConfig interface{} `json:"buildConfig,omitempty"` + + // Metadata contains other properties of the build. + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` + + // Materials is the collection of artifacts that influenced the build including sources, + // dependencies, build tools, base images, and so on. + // + // This is considered to be incomplete unless metadata.completeness.materials is true. + Materials []common.ProvenanceMaterial `json:"materials,omitempty"` +} + +// ProvenanceInvocation identifies the event that kicked off the build. +type ProvenanceInvocation struct { + // ConfigSource describes where the config file that kicked off the build came from. This is + // effectively a pointer to the source where [ProvenancePredicate.BuildConfig] came from. + ConfigSource ConfigSource `json:"configSource,omitempty"` + + // Parameters is a collection of all external inputs that influenced the build on top of + // ConfigSource. For example, if the invocation type were “make”, then this might be the + // flags passed to make aside from the target, which is captured in [ConfigSource.EntryPoint]. + // + // Consumers SHOULD accept only “safe” Parameters. The simplest and safest way to + // achieve this is to disallow any parameters altogether. + // + // This is an arbitrary JSON object with a schema defined by buildType. + Parameters interface{} `json:"parameters,omitempty"` + + // Environment contains any other builder-controlled inputs necessary for correctly evaluating + // the build. Usually only needed for reproducing the build but not evaluated as part of + // policy. + // + // This SHOULD be minimized to only include things that are part of the public API, that cannot + // be recomputed from other values in the provenance, and that actually affect the evaluation + // of the build. For example, this might include variables that are referenced in the workflow + // definition, but it SHOULD NOT include a dump of all environment variables or include things + // like the hostname (assuming hostname is not part of the public API). + Environment interface{} `json:"environment,omitempty"` +} + +type ConfigSource struct { + // URI indicating the identity of the source of the config. + URI string `json:"uri,omitempty"` + // Digest is a collection of cryptographic digests for the contents of the artifact specified + // by [URI]. + Digest common.DigestSet `json:"digest,omitempty"` + // EntryPoint identifying the entry point into the build. This is often a path to a + // configuration file and/or a target label within that file. The syntax and meaning are + // defined by buildType. For example, if the buildType were “make”, then this would reference + // the directory in which to run make as well as which target to use. + // + // Consumers SHOULD accept only specific [ProvenanceInvocation.EntryPoint] values. For example, + // a policy might only allow the "release" entry point but not the "debug" entry point. + // MAY be omitted if the buildType specifies a default value. + EntryPoint string `json:"entryPoint,omitempty"` +} + +// ProvenanceMetadata contains metadata for the built artifact. +type ProvenanceMetadata struct { + // BuildInvocationID identifies this particular build invocation, which can be useful for + // finding associated logs or other ad-hoc analysis. The exact meaning and format is defined + // by [common.ProvenanceBuilder.ID]; by default it is treated as opaque and case-sensitive. + // The value SHOULD be globally unique. + BuildInvocationID string `json:"buildInvocationID,omitempty"` + + // BuildStartedOn is the timestamp of when the build started. + // + // Use pointer to make sure that the abscense of a time is not + // encoded as the Epoch time. + BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"` + // BuildFinishedOn is the timestamp of when the build completed. + BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"` + + // Completeness indicates that the builder claims certain fields in this message to be + // complete. + Completeness ProvenanceComplete `json:"completeness"` + + // Reproducible if true, means the builder claims that running invocation on materials will + // produce bit-for-bit identical output. + Reproducible bool `json:"reproducible"` +} + +// ProvenanceComplete indicates wheter the claims in build/recipe are complete. +// For in depth information refer to the specifictaion: +// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md +type ProvenanceComplete struct { + // Parameters if true, means the builder claims that [ProvenanceInvocation.Parameters] is + // complete, meaning that all external inputs are properly captured in + // ProvenanceInvocation.Parameters. + Parameters bool `json:"parameters"` + // Environment if true, means the builder claims that [ProvenanceInvocation.Environment] is + // complete. + Environment bool `json:"environment"` + // Materials if true, means the builder claims that materials is complete, usually through some + // controls to prevent network access. Sometimes called “hermetic”. + Materials bool `json:"materials"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go new file mode 100644 index 00000000000..e849731dceb --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go @@ -0,0 +1,151 @@ +package v1 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v1" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + // The BuildDefinition describes all of the inputs to the build. The + // accuracy and completeness are implied by runDetails.builder.id. + // + // It SHOULD contain all the information necessary and sufficient to + // initialize the build and begin execution. + BuildDefinition ProvenanceBuildDefinition `json:"buildDefinition"` + + // Details specific to this particular execution of the build. + RunDetails ProvenanceRunDetails `json:"runDetails"` +} + +// ProvenanceBuildDefinition describes the inputs to the build. +type ProvenanceBuildDefinition struct { + // Identifies the template for how to perform the build and interpret the + // parameters and dependencies. + + // The URI SHOULD resolve to a human-readable specification that includes: + // overall description of the build type; schema for externalParameters and + // systemParameters; unambiguous instructions for how to initiate the build + // given this BuildDefinition, and a complete example. + BuildType string `json:"buildType"` + + // The parameters that are under external control, such as those set by a + // user or tenant of the build system. They MUST be complete at SLSA Build + // L3, meaning that that there is no additional mechanism for an external + // party to influence the build. (At lower SLSA Build levels, the + // completeness MAY be best effort.) + + // The build system SHOULD be designed to minimize the size and complexity + // of externalParameters, in order to reduce fragility and ease + // verification. Consumers SHOULD have an expectation of what “good” looks + // like; the more information that they need to check, the harder that task + // becomes. + ExternalParameters interface{} `json:"externalParameters"` + + // The parameters that are under the control of the entity represented by + // builder.id. The primary intention of this field is for debugging, + // incident response, and vulnerability management. The values here MAY be + // necessary for reproducing the build. There is no need to verify these + // parameters because the build system is already trusted, and in many cases + // it is not practical to do so. + InternalParameters interface{} `json:"internalParameters,omitempty"` + + // Unordered collection of artifacts needed at build time. Completeness is + // best effort, at least through SLSA Build L3. For example, if the build + // script fetches and executes “example.com/foo.sh”, which in turn fetches + // “example.com/bar.tar.gz”, then both “foo.sh” and “bar.tar.gz” SHOULD be + // listed here. + ResolvedDependencies []ResourceDescriptor `json:"resolvedDependencies,omitempty"` +} + +// ProvenanceRunDetails includes details specific to a particular execution of a +// build. +type ProvenanceRunDetails struct { + // Identifies the entity that executed the invocation, which is trusted to + // have correctly performed the operation and populated this provenance. + // + // This field is REQUIRED for SLSA Build 1 unless id is implicit from the + // attestation envelope. + Builder Builder `json:"builder"` + + // Metadata about this particular execution of the build. + BuildMetadata BuildMetadata `json:"metadata,omitempty"` + + // Additional artifacts generated during the build that are not considered + // the “output” of the build but that might be needed during debugging or + // incident response. For example, this might reference logs generated + // during the build and/or a digest of the fully evaluated build + // configuration. + // + // In most cases, this SHOULD NOT contain all intermediate files generated + // during the build. Instead, this SHOULD only contain files that are + // likely to be useful later and that cannot be easily reproduced. + Byproducts []ResourceDescriptor `json:"byproducts,omitempty"` +} + +// ResourceDescriptor describes a particular software artifact or resource +// (mutable or immutable). +// See https://github.com/in-toto/attestation/blob/main/spec/v1.0/resource_descriptor.md +type ResourceDescriptor struct { + // A URI used to identify the resource or artifact globally. This field is + // REQUIRED unless either digest or content is set. + URI string `json:"uri,omitempty"` + + // A set of cryptographic digests of the contents of the resource or + // artifact. This field is REQUIRED unless either uri or content is set. + Digest common.DigestSet `json:"digest,omitempty"` + + // TMachine-readable identifier for distinguishing between descriptors. + Name string `json:"name,omitempty"` + + // The location of the described resource or artifact, if different from the + // uri. + DownloadLocation string `json:"downloadLocation,omitempty"` + + // The MIME Type (i.e., media type) of the described resource or artifact. + MediaType string `json:"mediaType,omitempty"` + + // The contents of the resource or artifact. This field is REQUIRED unless + // either uri or digest is set. + Content []byte `json:"content,omitempty"` + + // This field MAY be used to provide additional information or metadata + // about the resource or artifact that may be useful to the consumer when + // evaluating the attestation against a policy. + Annotations map[string]interface{} `json:"annotations,omitempty"` +} + +// Builder represents the transitive closure of all the entities that are, by +// necessity, trusted to faithfully run the build and record the provenance. +type Builder struct { + // URI indicating the transitive closure of the trusted builder. + ID string `json:"id"` + + // Version numbers of components of the builder. + Version map[string]string `json:"version,omitempty"` + + // Dependencies used by the orchestrator that are not run within the + // workload and that do not affect the build, but might affect the + // provenance generation or security guarantees. + BuilderDependencies []ResourceDescriptor `json:"builderDependencies,omitempty"` +} + +type BuildMetadata struct { + // Identifies this particular build invocation, which can be useful for + // finding associated logs or other ad-hoc analysis. The exact meaning and + // format is defined by builder.id; by default it is treated as opaque and + // case-sensitive. The value SHOULD be globally unique. + InvocationID string `json:"invocationID,omitempty"` + + // The timestamp of when the build started. + StartedOn *time.Time `json:"startedOn,omitempty"` + + // The timestamp of when the build completed. + FinishedOn *time.Time `json:"finishedOn,omitempty"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go new file mode 100644 index 00000000000..5c36dede13d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -0,0 +1,190 @@ +package in_toto + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" +) + +var ErrUnknownMetadataType = errors.New("unknown metadata type encountered: not link or layout") + +/* +Set represents a data structure for set operations. See `NewSet` for how to +create a Set, and available Set receivers for useful set operations. + +Under the hood Set aliases map[string]struct{}, where the map keys are the set +elements and the map values are a memory-efficient way of storing the keys. +*/ +type Set map[string]struct{} + +/* +NewSet creates a new Set, assigns it the optionally passed variadic string +elements, and returns it. +*/ +func NewSet(elems ...string) Set { + var s Set = make(map[string]struct{}) + for _, elem := range elems { + s.Add(elem) + } + return s +} + +/* +Has returns True if the passed string is member of the set on which it was +called and False otherwise. +*/ +func (s Set) Has(elem string) bool { + _, ok := s[elem] + return ok +} + +/* +Add adds the passed string to the set on which it was called, if the string is +not a member of the set. +*/ +func (s Set) Add(elem string) { + s[elem] = struct{}{} +} + +/* +Remove removes the passed string from the set on which was is called, if the +string is a member of the set. +*/ +func (s Set) Remove(elem string) { + delete(s, elem) +} + +/* +Intersection creates and returns a new Set with the elements of the set on +which it was called that are also in the passed set. +*/ +func (s Set) Intersection(s2 Set) Set { + res := NewSet() + for elem := range s { + if !s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Difference creates and returns a new Set with the elements of the set on +which it was called that are not in the passed set. +*/ +func (s Set) Difference(s2 Set) Set { + res := NewSet() + for elem := range s { + if s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Filter creates and returns a new Set with the elements of the set on which it +was called that match the passed pattern. A matching error is treated like a +non-match plus a warning is printed. +*/ +func (s Set) Filter(pattern string) Set { + res := NewSet() + for elem := range s { + matched, err := match(pattern, elem) + if err != nil { + fmt.Printf("WARNING: %s, pattern was '%s'\n", err, pattern) + continue + } + if !matched { + continue + } + res.Add(elem) + } + return res +} + +/* +Slice creates and returns an unordered string slice with the elements of the +set on which it was called. +*/ +func (s Set) Slice() []string { + var res []string + res = make([]string, 0, len(s)) + for elem := range s { + res = append(res, elem) + } + return res +} + +/* +InterfaceKeyStrings returns string keys of passed interface{} map in an +unordered string slice. +*/ +func InterfaceKeyStrings(m map[string]interface{}) []string { + res := make([]string, len(m)) + i := 0 + for k := range m { + res[i] = k + i++ + } + return res +} + +/* +IsSubSet checks if the parameter subset is a +subset of the superset s. +*/ +func (s Set) IsSubSet(subset Set) bool { + if len(subset) > len(s) { + return false + } + for key := range subset { + if s.Has(key) { + continue + } else { + return false + } + } + return true +} + +func loadPayload(payloadBytes []byte) (any, error) { + var payload map[string]any + if err := json.Unmarshal(payloadBytes, &payload); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + if payload["_type"] == "link" { + var link Link + if err := checkRequiredJSONFields(payload, reflect.TypeOf(link)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&link); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return link, nil + } else if payload["_type"] == "layout" { + var layout Layout + if err := checkRequiredJSONFields(payload, reflect.TypeOf(layout)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&layout); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return layout, nil + } + + return nil, ErrUnknownMetadataType +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go new file mode 100644 index 00000000000..f555f79a528 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go @@ -0,0 +1,14 @@ +//go:build linux || darwin || !windows +// +build linux darwin !windows + +package in_toto + +import "golang.org/x/sys/unix" + +func isWritable(path string) error { + err := unix.Access(path, unix.W_OK) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go new file mode 100644 index 00000000000..8552f0345d0 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go @@ -0,0 +1,25 @@ +package in_toto + +import ( + "errors" + "os" +) + +func isWritable(path string) error { + // get fileInfo + info, err := os.Stat(path) + if err != nil { + return err + } + + // check if path is a directory + if !info.IsDir() { + return errors.New("not a directory") + } + + // Check if the user bit is enabled in file permission + if info.Mode().Perm()&(1<<(uint(7))) == 0 { + return errors.New("not writable") + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go new file mode 100644 index 00000000000..2564bd47eb2 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -0,0 +1,1108 @@ +/* +Package in_toto implements types and routines to verify a software supply chain +according to the in-toto specification. +See https://github.com/in-toto/docs/blob/master/in-toto-spec.md +*/ +package in_toto + +import ( + "crypto/x509" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "strings" + "time" +) + +// ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink +var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk") + +var ErrNotLayout = errors.New("verification workflow passed a non-layout") + +/* +RunInspections iteratively executes the command in the Run field of all +inspections of the passed layout, creating unsigned link metadata that records +all files found in the current working directory as materials (before command +execution) and products (after command execution). A map with inspection names +as keys and Metablocks containing the generated link metadata as values is +returned. The format is: + + { + : Metablock, + : Metablock, + ... + } + +If executing the inspection command fails, or if the executed command has a +non-zero exit code, the first return value is an empty Metablock map and the +second return value is the error. +*/ +func RunInspections(layout Layout, runDir string, lineNormalization bool, useDSSE bool) (map[string]Metadata, error) { + inspectionMetadata := make(map[string]Metadata) + + for _, inspection := range layout.Inspect { + + paths := []string{"."} + if runDir != "" { + paths = []string{runDir} + } + + linkEnv, err := InTotoRun(inspection.Name, runDir, paths, paths, + inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization, false, useDSSE) + + if err != nil { + return nil, err + } + + retVal := linkEnv.GetPayload().(Link).ByProducts["return-value"] + if retVal != float64(0) { + return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ + " returned a non-zero value: %d", inspection.Run, inspection.Name, + retVal) + } + + // Dump inspection link to cwd using the short link name format + linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name) + if err := linkEnv.Dump(linkName); err != nil { + fmt.Printf("JSON serialization or writing failed: %s", err) + } + + inspectionMetadata[inspection.Name] = linkEnv + } + return inspectionMetadata, nil +} + +// verifyMatchRule is a helper function to process artifact rules of +// type MATCH. See VerifyArtifacts for more details. +func verifyMatchRule(ruleData map[string]string, + srcArtifacts map[string]interface{}, srcArtifactQueue Set, + itemsMetadata map[string]Metadata) Set { + consumed := NewSet() + // Get destination link metadata + dstLinkEnv, exists := itemsMetadata[ruleData["dstName"]] + if !exists { + // Destination link does not exist, rule can't consume any + // artifacts + return consumed + } + + // Get artifacts from destination link metadata + var dstArtifacts map[string]interface{} + switch ruleData["dstType"] { + case "materials": + dstArtifacts = dstLinkEnv.GetPayload().(Link).Materials + case "products": + dstArtifacts = dstLinkEnv.GetPayload().(Link).Products + } + + // cleanup paths in pattern and artifact maps + if ruleData["pattern"] != "" { + ruleData["pattern"] = path.Clean(ruleData["pattern"]) + } + for k := range srcArtifacts { + if path.Clean(k) != k { + srcArtifacts[path.Clean(k)] = srcArtifacts[k] + delete(srcArtifacts, k) + } + } + for k := range dstArtifacts { + if path.Clean(k) != k { + dstArtifacts[path.Clean(k)] = dstArtifacts[k] + delete(dstArtifacts, k) + } + } + + // Normalize optional source and destination prefixes, i.e. if + // there is a prefix, then add a trailing slash if not there yet + for _, prefix := range []string{"srcPrefix", "dstPrefix"} { + if ruleData[prefix] != "" { + ruleData[prefix] = path.Clean(ruleData[prefix]) + if !strings.HasSuffix(ruleData[prefix], "/") { + ruleData[prefix] += "/" + } + } + } + // Iterate over queue and mark consumed artifacts + for srcPath := range srcArtifactQueue { + // Remove optional source prefix from source artifact path + // Noop if prefix is empty, or artifact does not have it + srcBasePath := strings.TrimPrefix(srcPath, ruleData["srcPrefix"]) + + // Ignore artifacts not matched by rule pattern + matched, err := match(ruleData["pattern"], srcBasePath) + if err != nil || !matched { + continue + } + + // Construct corresponding destination artifact path, i.e. + // an optional destination prefix plus the source base path + dstPath := path.Clean(path.Join(ruleData["dstPrefix"], srcBasePath)) + + // Try to find the corresponding destination artifact + dstArtifact, exists := dstArtifacts[dstPath] + // Ignore artifacts without corresponding destination artifact + if !exists { + continue + } + + // Ignore artifact pairs with no matching hashes + if !reflect.DeepEqual(srcArtifacts[srcPath], dstArtifact) { + continue + } + + // Only if a source and destination artifact pair was found and + // their hashes are equal, will we mark the source artifact as + // successfully consumed, i.e. it will be removed from the queue + consumed.Add(srcPath) + } + return consumed +} + +/* +VerifyArtifacts iteratively applies the material and product rules of the +passed items (step or inspection) to enforce and authorize artifacts (materials +or products) reported by the corresponding link and to guarantee that +artifacts are linked together across links. In the beginning all artifacts are +placed in a queue according to their type. If an artifact gets consumed by a +rule it is removed from the queue. An artifact can only be consumed once in +the course of processing the set of rules in ExpectedMaterials or +ExpectedProducts. + +Rules of type MATCH, ALLOW, CREATE, DELETE, MODIFY and DISALLOW are supported. + +All rules except for DISALLOW consume queued artifacts on success, and +leave the queue unchanged on failure. Hence, it is left to a terminal +DISALLOW rule to fail overall verification, if artifacts are left in the queue +that should have been consumed by preceding rules. +*/ +func VerifyArtifacts(items []interface{}, + itemsMetadata map[string]Metadata) error { + // Verify artifact rules for each item in the layout + for _, itemI := range items { + // The layout item (interface) must be a Link or an Inspection we are only + // interested in the name and the expected materials and products + var itemName string + var expectedMaterials [][]string + var expectedProducts [][]string + + switch item := itemI.(type) { + case Step: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + case Inspection: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + default: // Something wrong + return fmt.Errorf("VerifyArtifacts received an item of invalid type,"+ + " elements of passed slice 'items' must be one of 'Step' or"+ + " 'Inspection', got: '%s'", reflect.TypeOf(item)) + } + + // Use the item's name to extract the corresponding link + srcLinkEnv, exists := itemsMetadata[itemName] + if !exists { + return fmt.Errorf("VerifyArtifacts could not find metadata"+ + " for item '%s', got: '%s'", itemName, itemsMetadata) + } + + // Create shortcuts to materials and products (including hashes) reported + // by the item's link, required to verify "match" rules + materials := srcLinkEnv.GetPayload().(Link).Materials + products := srcLinkEnv.GetPayload().(Link).Products + + // All other rules only require the material or product paths (without + // hashes). We extract them from the corresponding maps and store them as + // sets for convenience in further processing + materialPaths := NewSet() + for _, p := range InterfaceKeyStrings(materials) { + materialPaths.Add(path.Clean(p)) + } + productPaths := NewSet() + for _, p := range InterfaceKeyStrings(products) { + productPaths.Add(path.Clean(p)) + } + + // For `create`, `delete` and `modify` rules we prepare sets of artifacts + // (without hashes) that were created, deleted or modified in the current + // step or inspection + created := productPaths.Difference(materialPaths) + deleted := materialPaths.Difference(productPaths) + remained := materialPaths.Intersection(productPaths) + modified := NewSet() + for name := range remained { + if !reflect.DeepEqual(materials[name], products[name]) { + modified.Add(name) + } + } + + // For each item we have to run rule verification, once per artifact type. + // Here we prepare the corresponding data for each round. + verificationDataList := []map[string]interface{}{ + { + "srcType": "materials", + "rules": expectedMaterials, + "artifacts": materials, + "artifactPaths": materialPaths, + }, + { + "srcType": "products", + "rules": expectedProducts, + "artifacts": products, + "artifactPaths": productPaths, + }, + } + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Verifying %s '%s' ", reflect.TypeOf(itemI), itemName) + + // Process all material rules using the corresponding materials and all + // product rules using the corresponding products + for _, verificationData := range verificationDataList { + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("%s...\n", verificationData["srcType"]) + + rules := verificationData["rules"].([][]string) + artifacts := verificationData["artifacts"].(map[string]interface{}) + + // Use artifacts (without hashes) as base queue. Each rule only operates + // on artifacts in that queue. If a rule consumes an artifact (i.e. can + // be applied successfully), the artifact is removed from the queue. By + // applying a DISALLOW rule eventually, verification may return an error, + // if the rule matches any artifacts in the queue that should have been + // consumed earlier. + queue := verificationData["artifactPaths"].(Set) + + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Initial state\nMaterials: %s\nProducts: %s\nQueue: %s\n\n", + // materialPaths.Slice(), productPaths.Slice(), queue.Slice()) + + // Verify rules sequentially + for _, rule := range rules { + // Parse rule and error out if it is malformed + // NOTE: the rule format should have been validated before + ruleData, err := UnpackRule(rule) + if err != nil { + return err + } + + // Apply rule pattern to filter queued artifacts that are up for rule + // specific consumption + filtered := queue.Filter(path.Clean(ruleData["pattern"])) + + var consumed Set + switch ruleData["type"] { + case "match": + // Note: here we need to perform more elaborate filtering + consumed = verifyMatchRule(ruleData, artifacts, queue, itemsMetadata) + + case "allow": + // Consumes all filtered artifacts + consumed = filtered + + case "create": + // Consumes filtered artifacts that were created + consumed = filtered.Intersection(created) + + case "delete": + // Consumes filtered artifacts that were deleted + consumed = filtered.Intersection(deleted) + + case "modify": + // Consumes filtered artifacts that were modified + consumed = filtered.Intersection(modified) + + case "disallow": + // Does not consume but errors out if artifacts were filtered + if len(filtered) > 0 { + return fmt.Errorf("artifact verification failed for %s '%s',"+ + " %s %s disallowed by rule %s", + reflect.TypeOf(itemI).Name(), itemName, + verificationData["srcType"], filtered.Slice(), rule) + } + case "require": + // REQUIRE is somewhat of a weird animal that does not use + // patterns bur rather single filenames (for now). + if !queue.Has(ruleData["pattern"]) { + return fmt.Errorf("artifact verification failed for %s in REQUIRE '%s',"+ + " because %s is not in %s", verificationData["srcType"], + ruleData["pattern"], ruleData["pattern"], queue.Slice()) + } + } + // Update queue by removing consumed artifacts + queue = queue.Difference(consumed) + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Rule: %s\nQueue: %s\n\n", rule, queue.Slice()) + } + } + } + return nil +} + +/* +ReduceStepsMetadata merges for each step of the passed Layout all the passed +per-functionary links into a single link, asserting that the reported Materials +and Products are equal across links for a given step. This function may be +used at a time during the overall verification, where link threshold's have +been verified and subsequent verification only needs one exemplary link per +step. The function returns a map with one Metablock (link) per step: + + { + : Metablock, + : Metablock, + ... + } + +If links corresponding to the same step report different Materials or different +Products, the first return value is an empty Metablock map and the second +return value is the error. +*/ +func ReduceStepsMetadata(layout Layout, + stepsMetadata map[string]map[string]Metadata) (map[string]Metadata, + error) { + stepsMetadataReduced := make(map[string]Metadata) + + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not reduce metadata for step '" + step.Name + + "', no link metadata found.") + } + + // Get the first link (could be any link) for the current step, which will + // serve as reference link for below comparisons + var referenceKeyID string + var referenceLinkEnv Metadata + for keyID, linkEnv := range linksPerStep { + referenceLinkEnv = linkEnv + referenceKeyID = keyID + break + } + + // Only one link, nothing to reduce, take the reference link + if len(linksPerStep) == 1 { + stepsMetadataReduced[step.Name] = referenceLinkEnv + + // Multiple links, reduce but first check + } else { + // Artifact maps must be equal for each type among all links + // TODO: What should we do if there are more links, than the + // threshold requires, but not all of them are equal? Right now we would + // also error. + for keyID, linkEnv := range linksPerStep { + if !reflect.DeepEqual(linkEnv.GetPayload().(Link).Materials, + referenceLinkEnv.GetPayload().(Link).Materials) || + !reflect.DeepEqual(linkEnv.GetPayload().(Link).Products, + referenceLinkEnv.GetPayload().(Link).Products) { + return nil, fmt.Errorf("link '%s' and '%s' have different"+ + " artifacts", + fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), + fmt.Sprintf(LinkNameFormat, step.Name, keyID)) + } + } + // We haven't errored out, so we can reduce (i.e take the reference link) + stepsMetadataReduced[step.Name] = referenceLinkEnv + } + } + return stepsMetadataReduced, nil +} + +/* +VerifyStepCommandAlignment (soft) verifies that for each step of the passed +layout the command executed, as per the passed link, matches the expected +command, as per the layout. Soft verification means that, in case a command +does not align, a warning is issued. +*/ +func VerifyStepCommandAlignment(layout Layout, + stepsMetadata map[string]map[string]Metadata) { + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not verify command alignment for step '" + step.Name + + "', no link metadata found.") + } + + for signerKeyID, linkEnv := range linksPerStep { + expectedCommandS := strings.Join(step.ExpectedCommand, " ") + executedCommandS := strings.Join(linkEnv.GetPayload().(Link).Command, " ") + + if expectedCommandS != executedCommandS { + linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) + fmt.Printf("WARNING: Expected command for step '%s' (%s) and command"+ + " reported by '%s' (%s) differ.\n", + step.Name, expectedCommandS, linkName, executedCommandS) + } + } + } +} + +/* +LoadLayoutCertificates loads the root and intermediate CAs from the layout if in the layout. +This will be used to check signatures that were used to sign links but not configured +in the PubKeys section of the step. No configured CAs means we don't want to allow this. +Returned CertPools will be empty in this case. +*/ +func LoadLayoutCertificates(layout Layout, intermediatePems [][]byte) (*x509.CertPool, *x509.CertPool, error) { + rootPool := x509.NewCertPool() + for _, certPem := range layout.RootCas { + ok := rootPool.AppendCertsFromPEM([]byte(certPem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load root certificates for layout") + } + } + + intermediatePool := x509.NewCertPool() + for _, intermediatePem := range layout.IntermediateCas { + ok := intermediatePool.AppendCertsFromPEM([]byte(intermediatePem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load intermediate certificates for layout") + } + } + + for _, intermediatePem := range intermediatePems { + ok := intermediatePool.AppendCertsFromPEM(intermediatePem) + if !ok { + return nil, nil, fmt.Errorf("failed to load provided intermediate certificates") + } + } + + return rootPool, intermediatePool, nil +} + +/* +VerifyLinkSignatureThesholds verifies that for each step of the passed layout, +there are at least Threshold links, validly signed by different authorized +functionaries. The returned map of link metadata per steps contains only +links with valid signatures from distinct functionaries and has the format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If for any step of the layout there are not enough links available, the first +return value is an empty map of Metablock maps and the second return value is +the error. +*/ +func VerifyLinkSignatureThesholds(layout Layout, + stepsMetadata map[string]map[string]Metadata, rootCertPool, intermediateCertPool *x509.CertPool) ( + map[string]map[string]Metadata, error) { + // This will stores links with valid signature from an authorized functionary + // for all steps + stepsMetadataVerified := make(map[string]map[string]Metadata) + + // Try to find enough (>= threshold) links each with a valid signature from + // distinct authorized functionaries for each step + for _, step := range layout.Steps { + var stepErr error + + // This will store links with valid signature from an authorized + // functionary for the given step + linksPerStepVerified := make(map[string]Metadata) + + // Check if there are any links at all for the given step + linksPerStep, ok := stepsMetadata[step.Name] + if !ok || len(linksPerStep) < 1 { + stepErr = fmt.Errorf("no links found") + } + + // For each link corresponding to a step, check that the signer key was + // authorized, the layout contains a verification key and the signature + // verification passes. Only good links are stored, to verify thresholds + // below. + isAuthorizedSignature := false + for signerKeyID, linkEnv := range linksPerStep { + for _, authorizedKeyID := range step.PubKeys { + if signerKeyID == authorizedKeyID { + if verifierKey, ok := layout.Keys[authorizedKeyID]; ok { + if err := linkEnv.VerifySignature(verifierKey); err == nil { + linksPerStepVerified[signerKeyID] = linkEnv + isAuthorizedSignature = true + break + } + } + } + } + + // If the signer's key wasn't in our step's pubkeys array, check the cert pool to + // see if the key is known to us. + if !isAuthorizedSignature { + sig, err := linkEnv.GetSignatureForKeyID(signerKeyID) + if err != nil { + stepErr = err + continue + } + + cert, err := sig.GetCertificate() + if err != nil { + stepErr = err + continue + } + + // test certificate against the step's constraints to make sure it's a valid functionary + err = step.CheckCertConstraints(cert, layout.RootCAIDs(), rootCertPool, intermediateCertPool) + if err != nil { + stepErr = err + continue + } + + err = linkEnv.VerifySignature(cert) + if err != nil { + stepErr = err + continue + } + + linksPerStepVerified[signerKeyID] = linkEnv + } + } + + // Store all good links for a step + stepsMetadataVerified[step.Name] = linksPerStepVerified + + if len(linksPerStepVerified) < step.Threshold { + linksPerStep := stepsMetadata[step.Name] + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s)."+ + " '%d' out of '%d' available link(s) have a valid signature from an"+ + " authorized signer: %v", step.Name, step.Threshold, + len(linksPerStepVerified), len(linksPerStep), stepErr) + } + } + return stepsMetadataVerified, nil +} + +/* +LoadLinksForLayout loads for every Step of the passed Layout a Metablock +containing the corresponding Link. A base path to a directory that contains +the links may be passed using linkDir. Link file names are constructed, +using LinkNameFormat together with the corresponding step name and authorized +functionary key ids. A map of link metadata is returned and has the following +format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If a link cannot be loaded at a constructed link name or is invalid, it is +ignored. Only a preliminary threshold check is performed, that is, if there +aren't at least Threshold links for any given step, the first return value +is an empty map of Metablock maps and the second return value is the error. +*/ +func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metadata, error) { + stepsMetadata := make(map[string]map[string]Metadata) + + for _, step := range layout.Steps { + linksPerStep := make(map[string]Metadata) + // Since we can verify against certificates belonging to a CA, we need to + // load any possible links + linkFiles, err := filepath.Glob(path.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) + if err != nil { + return nil, err + } + + for _, linkPath := range linkFiles { + linkEnv, err := LoadMetadata(linkPath) + if err != nil { + continue + } + + // To get the full key from the metadata's signatures, we have to check + // for one with the same short id... + signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link") + for _, sig := range linkEnv.Sigs() { + if strings.HasPrefix(sig.KeyID, signerShortKeyID) { + linksPerStep[sig.KeyID] = linkEnv + break + } + } + } + + if len(linksPerStep) < step.Threshold { + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s),"+ + " found '%d'", step.Name, step.Threshold, len(linksPerStep)) + } + + stepsMetadata[step.Name] = linksPerStep + } + + return stepsMetadata, nil +} + +/* +VerifyLayoutExpiration verifies that the passed Layout has not expired. It +returns an error if the (zulu) date in the Expires field is in the past. +*/ +func VerifyLayoutExpiration(layout Layout) error { + expires, err := time.Parse(ISO8601DateSchema, layout.Expires) + if err != nil { + return err + } + // Uses timezone of expires, i.e. UTC + if time.Until(expires) < 0 { + return fmt.Errorf("layout has expired on '%s'", expires) + } + return nil +} + +/* +VerifyLayoutSignatures verifies for each key in the passed key map the +corresponding signature of the Layout in the passed Metablock's Signed field. +Signatures and keys are associated by key id. If the key map is empty, or the +Metablock's Signature field does not have a signature for one or more of the +passed keys, or a matching signature is invalid, an error is returned. +*/ +func VerifyLayoutSignatures(layoutEnv Metadata, + layoutKeys map[string]Key) error { + if len(layoutKeys) < 1 { + return fmt.Errorf("layout verification requires at least one key") + } + + for _, key := range layoutKeys { + if err := layoutEnv.VerifySignature(key); err != nil { + return err + } + } + return nil +} + +/* +GetSummaryLink merges the materials of the first step (as mentioned in the +layout) and the products of the last step and returns a new link. This link +reports the materials and products and summarizes the overall software supply +chain. +NOTE: The assumption is that the steps mentioned in the layout are to be +performed sequentially. So, the first step mentioned in the layout denotes what +comes into the supply chain and the last step denotes what goes out. +*/ +func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metadata, + stepName string, useDSSE bool) (Metadata, error) { + var summaryLink Link + if len(layout.Steps) > 0 { + firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] + lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] + + summaryLink.Materials = firstStepLink.GetPayload().(Link).Materials + summaryLink.Name = stepName + summaryLink.Type = firstStepLink.GetPayload().(Link).Type + + summaryLink.Products = lastStepLink.GetPayload().(Link).Products + summaryLink.ByProducts = lastStepLink.GetPayload().(Link).ByProducts + // Using the last command of the sublayout as the command + // of the summary link can be misleading. Is it necessary to + // include all the commands executed as part of sublayout? + summaryLink.Command = lastStepLink.GetPayload().(Link).Command + } + + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(summaryLink); err != nil { + return nil, err + } + + return env, nil + } + + return &Metablock{Signed: summaryLink}, nil +} + +/* +VerifySublayouts checks if any step in the supply chain is a sublayout, and if +so, recursively resolves it and replaces it with a summary link summarizing the +steps carried out in the sublayout. +*/ +func VerifySublayouts(layout Layout, + stepsMetadataVerified map[string]map[string]Metadata, + superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metadata, error) { + for stepName, linkData := range stepsMetadataVerified { + for keyID, metadata := range linkData { + if _, ok := metadata.GetPayload().(Layout); ok { + layoutKeys := make(map[string]Key) + layoutKeys[keyID] = layout.Keys[keyID] + + sublayoutLinkDir := fmt.Sprintf(SublayoutLinkDirFormat, + stepName, keyID) + sublayoutLinkPath := filepath.Join(superLayoutLinkPath, + sublayoutLinkDir) + summaryLink, err := InTotoVerify(metadata, layoutKeys, + sublayoutLinkPath, stepName, make(map[string]string), intermediatePems, lineNormalization) + if err != nil { + return nil, err + } + linkData[keyID] = summaryLink + } + + } + } + return stepsMetadataVerified, nil +} + +// TODO: find a better way than two helper functions for the replacer op + +func substituteParamatersInSlice(replacer *strings.Replacer, slice []string) []string { + newSlice := make([]string, 0) + for _, item := range slice { + newSlice = append(newSlice, replacer.Replace(item)) + } + return newSlice +} + +func substituteParametersInSliceOfSlices(replacer *strings.Replacer, + slice [][]string) [][]string { + newSlice := make([][]string, 0) + for _, item := range slice { + newSlice = append(newSlice, substituteParamatersInSlice(replacer, + item)) + } + return newSlice +} + +/* +SubstituteParameters performs parameter substitution in steps and inspections +in the following fields: +- Expected Materials and Expected Products of both +- Run of inspections +- Expected Command of steps +The substitution marker is '{}' and the keyword within the braces is replaced +by a value found in the substitution map passed, parameterDictionary. The +layout with parameters substituted is returned to the calling function. +*/ +func SubstituteParameters(layout Layout, + parameterDictionary map[string]string) (Layout, error) { + + if len(parameterDictionary) == 0 { + return layout, nil + } + + parameters := make([]string, 0) + + re := regexp.MustCompile("^[a-zA-Z0-9_-]+$") + + for parameter, value := range parameterDictionary { + parameterFormatCheck := re.MatchString(parameter) + if !parameterFormatCheck { + return layout, fmt.Errorf("invalid format for parameter") + } + + parameters = append(parameters, "{"+parameter+"}") + parameters = append(parameters, value) + } + + replacer := strings.NewReplacer(parameters...) + + for i := range layout.Steps { + layout.Steps[i].ExpectedMaterials = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedMaterials) + layout.Steps[i].ExpectedProducts = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedProducts) + layout.Steps[i].ExpectedCommand = substituteParamatersInSlice(replacer, + layout.Steps[i].ExpectedCommand) + } + + for i := range layout.Inspect { + layout.Inspect[i].ExpectedMaterials = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedMaterials) + layout.Inspect[i].ExpectedProducts = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedProducts) + layout.Inspect[i].Run = substituteParamatersInSlice(replacer, + layout.Inspect[i].Run) + } + + return layout, nil +} + +/* +InTotoVerify can be used to verify an entire software supply chain according to +the in-toto specification. It requires the metadata of the root layout, a map +that contains public keys to verify the root layout signatures, a path to a +directory from where it can load link metadata files, which are treated as +signed evidence for the steps defined in the layout, a step name, and a +paramater dictionary used for parameter substitution. The step name only +matters for sublayouts, where it's important to associate the summary of that +step with a unique name. The verification routine is as follows: + +1. Verify layout signature(s) using passed key(s) +2. Verify layout expiration date +3. Substitute parameters in layout +4. Load link metadata files for steps of layout +5. Verify signatures and signature thresholds for steps of layout +6. Verify sublayouts recursively +7. Verify command alignment for steps of layout (only warns) +8. Verify artifact rules for steps of layout +9. Execute inspection commands (generates link metadata for each inspection) +10. Verify artifact rules for inspections of layout + +InTotoVerify returns a summary link wrapped in a Metablock object and an error +value. If any of the verification routines fail, verification is aborted and +error is returned. In such an instance, the first value remains an empty +Metablock object. + +NOTE: Artifact rules of type "create", "modify" +and "delete" are currently not supported. +*/ +func InTotoVerify(layoutEnv Metadata, layoutKeys map[string]Key, + linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metadata, error) { + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err + } + + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true + } + + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return nil, err + } + + // Substitute parameters in layout + layout, err := SubstituteParameters(layout, parameterDictionary) + if err != nil { + return nil, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return nil, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return nil, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return nil, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return nil, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return nil, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return nil, err + } + + inspectionMetadata, err := RunInspections(layout, "", lineNormalization, useDSSE) + if err != nil { + return nil, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return nil, err + } + + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) + if err != nil { + return nil, err + } + + return summaryLink, nil +} + +/* +InTotoVerifyWithDirectory provides the same functionality as InTotoVerify, but +adds the possibility to select a local directory from where the inspections are run. +*/ +func InTotoVerifyWithDirectory(layoutEnv Metadata, layoutKeys map[string]Key, + linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metadata, error) { + + // runDir sanity checks + // check if path exists + info, err := os.Stat(runDir) + if err != nil { + return nil, err + } + + // check if runDir is a symlink + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + return nil, ErrInspectionRunDirIsSymlink + } + + // check if runDir is writable and a directory + err = isWritable(runDir) + if err != nil { + return nil, err + } + + // check if runDir is empty (we do not want to overwrite files) + // We abuse File.Readdirnames for this action. + f, err := os.Open(runDir) + if err != nil { + return nil, err + } + defer f.Close() + // We use Readdirnames(1) for performance reasons, one child node + // is enough to proof that the directory is not empty + _, err = f.Readdirnames(1) + // if io.EOF gets returned as error the directory is empty + if err == io.EOF { + return nil, err + } + err = f.Close() + if err != nil { + return nil, err + } + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err + } + + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true + } + + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return nil, err + } + + // Substitute parameters in layout + layout, err = SubstituteParameters(layout, parameterDictionary) + if err != nil { + return nil, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return nil, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return nil, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return nil, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return nil, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return nil, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return nil, err + } + + inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization, useDSSE) + if err != nil { + return nil, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return nil, err + } + + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) + if err != nil { + return nil, err + } + + return summaryLink, nil +} diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 244ee19c4bf..af2ef639536 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -27,6 +27,16 @@ Use the links above for more information on each. # changelog +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + * Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 @@ -36,6 +46,9 @@ Use the links above for more information on each. * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 +
+ See changes to v1.17.x + * Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 @@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - + +
See changes to v1.16.x @@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv # license This code is licensed under the same conditions as the original Go code. See LICENSE file. + diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 4e92f5998a8..57d17eeab9e 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -421,7 +421,9 @@ func (d *compressor) deflateLazy() { d.h = newHuffmanEncoder(maxFlateBlockTokens) } var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { + toIndex := d.window[s.index:d.windowEnd] + toIndex = toIndex[:min(len(toIndex), maxFlateBlockTokens)] + for _, v := range toIndex { tmp[v]++ } d.h.generate(tmp[:], 15) diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 03a1796979b..7151140ccd7 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -646,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 } - numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync) + numLiterals, numOffsets := w.indexTokens(tokens, true) extraBits := 0 ssize, storable := w.storedSize(input) @@ -781,7 +781,7 @@ func (w *huffmanBitWriter) fillTokens() { // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { +func (w *huffmanBitWriter) indexTokens(t *tokens, alwaysEOB bool) (numLiterals, numOffsets int) { //copy(w.literalFreq[:], t.litHist[:]) *(*[256]uint16)(w.literalFreq[:]) = t.litHist //copy(w.literalFreq[256:], t.extraHist[:]) @@ -791,9 +791,10 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num if t.n == 0 { return } - if filled { - return maxNumLit, maxNumDist + if alwaysEOB { + w.literalFreq[endBlockMarker] = 1 } + // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 90b74f7acdd..455ed3e2b56 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -61,13 +61,19 @@ var bitWriterPool = sync.Pool{ }, } +// tokensPool contains tokens struct objects that can be reused +var tokensPool = sync.Pool{ + New: func() any { + return &tokens{} + }, +} + // StatelessDeflate allows compressing directly to a Writer without retaining state. // When returning everything will be flushed. // Up to 8KB of an optional dictionary can be given which is presumed to precede the block. // Longer dictionaries will be truncated and will still produce valid output. // Sending nil dictionary is perfectly fine. func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens bw := bitWriterPool.Get().(*huffmanBitWriter) bw.reset(out) defer func() { @@ -91,6 +97,12 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // For subsequent loops, keep shallow dict reference to avoid alloc+copy. var inDict []byte + dst := tokensPool.Get().(*tokens) + dst.Reset() + defer func() { + tokensPool.Put(dst) + }() + for len(in) > 0 { todo := in if len(inDict) > 0 { @@ -113,9 +125,9 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { } // Compress if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) + statelessEnc(dst, todo, int16(len(dict))) } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + statelessEnc(dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) } isEof := eof && len(in) == 0 @@ -129,7 +141,7 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // If we removed less than 1/16th, huffman compress the block. bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + bw.writeBlockDynamic(dst, isEof, uncompressed, len(in) == 0) } if len(in) > 0 { // Retain a dict if we have more diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore new file mode 100644 index 00000000000..c92c4d56084 --- /dev/null +++ b/vendor/github.com/oklog/ulid/.gitignore @@ -0,0 +1,29 @@ +#### joe made this: http://goel.io/joe + +#####=== Go ===##### + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml new file mode 100644 index 00000000000..43eb762fa34 --- /dev/null +++ b/vendor/github.com/oklog/ulid/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.10.x +install: + - go get -v github.com/golang/lint/golint + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go get -d -t -v ./... + - go build -v ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... + - go test -v -covermode=count -coverprofile=cov.out + - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md new file mode 100644 index 00000000000..95581c78b06 --- /dev/null +++ b/vendor/github.com/oklog/ulid/AUTHORS.md @@ -0,0 +1,2 @@ +- Peter Bourgon (@peterbourgon) +- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md new file mode 100644 index 00000000000..8da38c6b00d --- /dev/null +++ b/vendor/github.com/oklog/ulid/CHANGELOG.md @@ -0,0 +1,33 @@ +## 1.3.1 / 2018-10-02 + +* Use underlying entropy source for random increments in Monotonic (#32) + +## 1.3.0 / 2018-09-29 + +* Monotonic entropy support (#31) + +## 1.2.0 / 2018-09-09 + +* Add a function to convert Unix time in milliseconds back to time.Time (#30) + +## 1.1.0 / 2018-08-15 + +* Ensure random part is always read from the entropy reader in full (#28) + +## 1.0.0 / 2018-07-29 + +* Add ParseStrict and MustParseStrict functions (#26) +* Enforce overflow checking when parsing (#20) + +## 0.3.0 / 2017-01-03 + +* Implement ULID.Compare method + +## 0.2.0 / 2016-12-13 + +* Remove year 2262 Timestamp bug. (#1) +* Gracefully handle invalid encodings when parsing. + +## 0.1.0 / 2016-12-06 + +* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md new file mode 100644 index 00000000000..68f03f26eba --- /dev/null +++ b/vendor/github.com/oklog/ulid/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +We use GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first propose your ideas + in a Github issue. This will avoid unnecessary work and surely give + you and us a good deal of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock new file mode 100644 index 00000000000..349b449a6ea --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.lock @@ -0,0 +1,15 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/pborman/getopt" + packages = ["v2"] + revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml new file mode 100644 index 00000000000..624a7a019c7 --- /dev/null +++ b/vendor/github.com/oklog/ulid/Gopkg.toml @@ -0,0 +1,26 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/oklog/ulid/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/oklog/ulid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md new file mode 100644 index 00000000000..0a3d2f82b25 --- /dev/null +++ b/vendor/github.com/oklog/ulid/README.md @@ -0,0 +1,150 @@ +# Universally Unique Lexicographically Sortable Identifier + +![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) +[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) +[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) +[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) +[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) + +A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. + +## Background + +A GUID/UUID can be suboptimal for many use-cases because: + +- It isn't the most character efficient way of encoding 128 bits +- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address +- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures +- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures + +A ULID however: + +- Is compatible with UUID/GUID's +- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) +- Lexicographically sortable +- Canonically encoded as a 26 character string, as opposed to the 36 character UUID +- Uses Crockford's base32 for better efficiency and readability (5 bits per character) +- Case insensitive +- No special characters (URL safe) +- Monotonic sort order (correctly detects and handles the same millisecond) + +## Install + +```shell +go get github.com/oklog/ulid +``` + +## Usage + +An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. +This design allows for greater flexibility in choosing your trade-offs. + +Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. +Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. + + +```go +func ExampleULID() { + t := time.Unix(1000000, 0) + entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) + fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) + // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 +} + +``` + +## Specification + +Below is the current specification of ULID as implemented in this repository. + +### Components + +**Timestamp** +- 48 bits +- UNIX-time in milliseconds +- Won't run out of space till the year 10895 AD + +**Entropy** +- 80 bits +- User defined entropy source. +- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) + +### Encoding + +[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. +This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. + +``` +0123456789ABCDEFGHJKMNPQRSTVWXYZ +``` + +### Binary Layout and Byte Order + +The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). + +``` +0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_time_high | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 16_bit_uint_time_low | 16_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +``` + +### String Representation + +``` + 01AN4Z07BY 79KA1307SR9X4MV3 +|----------| |----------------| + Timestamp Entropy + 10 chars 16 chars + 48bits 80bits + base32 base32 +``` + +## Test + +```shell +go test ./... +``` + +## Benchmarks + +On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 + +``` +BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op +BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op +BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op +BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op +BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op +BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op +BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op +BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op +BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op +BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op +BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op +``` + +## Prior Art + +- [alizain/ulid](https://github.com/alizain/ulid) +- [RobThree/NUlid](https://github.com/RobThree/NUlid) +- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go new file mode 100644 index 00000000000..c5d0d66fd2a --- /dev/null +++ b/vendor/github.com/oklog/ulid/ulid.go @@ -0,0 +1,614 @@ +// Copyright 2016 The Oklog Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ulid + +import ( + "bufio" + "bytes" + "database/sql/driver" + "encoding/binary" + "errors" + "io" + "math" + "math/bits" + "math/rand" + "time" +) + +/* +An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier + + The components are encoded as 16 octets. + Each component is encoded with the MSB first (network byte order). + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 16_bit_uint_time_low | 16_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ +type ULID [16]byte + +var ( + // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong + // data size. + ErrDataSize = errors.New("ulid: bad data size when unmarshaling") + + // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with + // invalid Base32 encodings. + ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") + + // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient + // size. + ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") + + // ErrBigTime is returned when constructing an ULID with a time that is larger + // than MaxTime. + ErrBigTime = errors.New("ulid: time too big") + + // ErrOverflow is returned when unmarshaling a ULID whose first character is + // larger than 7, thereby exceeding the valid bit depth of 128. + ErrOverflow = errors.New("ulid: overflow when unmarshaling") + + // ErrMonotonicOverflow is returned by a Monotonic entropy source when + // incrementing the previous ULID's entropy bytes would result in overflow. + ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") + + // ErrScanValue is returned when the value passed to scan cannot be unmarshaled + // into the ULID. + ErrScanValue = errors.New("ulid: source value must be a string or byte slice") +) + +// New returns an ULID with the given Unix milliseconds timestamp and an +// optional entropy source. Use the Timestamp function to convert +// a time.Time to Unix milliseconds. +// +// ErrBigTime is returned when passing a timestamp bigger than MaxTime. +// Reading from the entropy source may also return an error. +func New(ms uint64, entropy io.Reader) (id ULID, err error) { + if err = id.SetTime(ms); err != nil { + return id, err + } + + switch e := entropy.(type) { + case nil: + return id, err + case *monotonic: + err = e.MonotonicRead(ms, id[6:]) + default: + _, err = io.ReadFull(e, id[6:]) + } + + return id, err +} + +// MustNew is a convenience function equivalent to New that panics on failure +// instead of returning an error. +func MustNew(ms uint64, entropy io.Reader) ULID { + id, err := New(ms, entropy) + if err != nil { + panic(err) + } + return id +} + +// Parse parses an encoded ULID, returning an error in case of failure. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. For a version that +// returns an error instead, see ParseStrict. +func Parse(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), false, &id) +} + +// ParseStrict parses an encoded ULID, returning an error in case of failure. +// +// It is like Parse, but additionally validates that the parsed ULID consists +// only of valid base32 characters. It is slightly slower than Parse. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings return ErrInvalidCharacters. +func ParseStrict(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), true, &id) +} + +func parse(v []byte, strict bool, id *ULID) error { + // Check if a base32 encoded ULID is the right length. + if len(v) != EncodedSize { + return ErrDataSize + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if strict && + (dec[v[0]] == 0xFF || + dec[v[1]] == 0xFF || + dec[v[2]] == 0xFF || + dec[v[3]] == 0xFF || + dec[v[4]] == 0xFF || + dec[v[5]] == 0xFF || + dec[v[6]] == 0xFF || + dec[v[7]] == 0xFF || + dec[v[8]] == 0xFF || + dec[v[9]] == 0xFF || + dec[v[10]] == 0xFF || + dec[v[11]] == 0xFF || + dec[v[12]] == 0xFF || + dec[v[13]] == 0xFF || + dec[v[14]] == 0xFF || + dec[v[15]] == 0xFF || + dec[v[16]] == 0xFF || + dec[v[17]] == 0xFF || + dec[v[18]] == 0xFF || + dec[v[19]] == 0xFF || + dec[v[20]] == 0xFF || + dec[v[21]] == 0xFF || + dec[v[22]] == 0xFF || + dec[v[23]] == 0xFF || + dec[v[24]] == 0xFF || + dec[v[25]] == 0xFF) { + return ErrInvalidCharacters + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if v[0] > '7' { + return ErrOverflow + } + + // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) + // to decode a base32 ULID. + + // 6 bytes timestamp (48 bits) + (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) + (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) + (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) + (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) + (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) + (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) + + // 10 bytes of entropy (80 bits) + (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) + (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) + (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) + (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) + (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) + (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) + (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) + (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) + (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) + (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) + + return nil +} + +// MustParse is a convenience function equivalent to Parse that panics on failure +// instead of returning an error. +func MustParse(ulid string) ULID { + id, err := Parse(ulid) + if err != nil { + panic(err) + } + return id +} + +// MustParseStrict is a convenience function equivalent to ParseStrict that +// panics on failure instead of returning an error. +func MustParseStrict(ulid string) ULID { + id, err := ParseStrict(ulid) + if err != nil { + panic(err) + } + return id +} + +// String returns a lexicographically sortable string encoded ULID +// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 +// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy +func (id ULID) String() string { + ulid := make([]byte, EncodedSize) + _ = id.MarshalTextTo(ulid) + return string(ulid) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface by +// returning the ULID as a byte slice. +func (id ULID) MarshalBinary() ([]byte, error) { + ulid := make([]byte, len(id)) + return ulid, id.MarshalBinaryTo(ulid) +} + +// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. +// ErrBufferSize is returned when the len(dst) != 16. +func (id ULID) MarshalBinaryTo(dst []byte) error { + if len(dst) != len(id) { + return ErrBufferSize + } + + copy(dst, id[:]) + return nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by +// copying the passed data and converting it to an ULID. ErrDataSize is +// returned if the data length is different from ULID length. +func (id *ULID) UnmarshalBinary(data []byte) error { + if len(data) != len(*id) { + return ErrDataSize + } + + copy((*id)[:], data) + return nil +} + +// Encoding is the base 32 encoding alphabet used in ULID strings. +const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" + +// MarshalText implements the encoding.TextMarshaler interface by +// returning the string encoded ULID. +func (id ULID) MarshalText() ([]byte, error) { + ulid := make([]byte, EncodedSize) + return ulid, id.MarshalTextTo(ulid) +} + +// MarshalTextTo writes the ULID as a string to the given buffer. +// ErrBufferSize is returned when the len(dst) != 26. +func (id ULID) MarshalTextTo(dst []byte) error { + // Optimized unrolled loop ahead. + // From https://github.com/RobThree/NUlid + + if len(dst) != EncodedSize { + return ErrBufferSize + } + + // 10 byte timestamp + dst[0] = Encoding[(id[0]&224)>>5] + dst[1] = Encoding[id[0]&31] + dst[2] = Encoding[(id[1]&248)>>3] + dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] + dst[4] = Encoding[(id[2]&62)>>1] + dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] + dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] + dst[7] = Encoding[(id[4]&124)>>2] + dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] + dst[9] = Encoding[id[5]&31] + + // 16 bytes of entropy + dst[10] = Encoding[(id[6]&248)>>3] + dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] + dst[12] = Encoding[(id[7]&62)>>1] + dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] + dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] + dst[15] = Encoding[(id[9]&124)>>2] + dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] + dst[17] = Encoding[id[10]&31] + dst[18] = Encoding[(id[11]&248)>>3] + dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] + dst[20] = Encoding[(id[12]&62)>>1] + dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] + dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] + dst[23] = Encoding[(id[14]&124)>>2] + dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] + dst[25] = Encoding[id[15]&31] + + return nil +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var dec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const EncodedSize = 26 + +// UnmarshalText implements the encoding.TextUnmarshaler interface by +// parsing the data as string encoded ULID. +// +// ErrDataSize is returned if the len(v) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. +func (id *ULID) UnmarshalText(v []byte) error { + return parse(v, false, id) +} + +// Time returns the Unix time in milliseconds encoded in the ULID. +// Use the top level Time function to convert the returned value to +// a time.Time. +func (id ULID) Time() uint64 { + return uint64(id[5]) | uint64(id[4])<<8 | + uint64(id[3])<<16 | uint64(id[2])<<24 | + uint64(id[1])<<32 | uint64(id[0])<<40 +} + +// maxTime is the maximum Unix time in milliseconds that can be +// represented in an ULID. +var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() + +// MaxTime returns the maximum Unix time in milliseconds that +// can be encoded in an ULID. +func MaxTime() uint64 { return maxTime } + +// Now is a convenience function that returns the current +// UTC time in Unix milliseconds. Equivalent to: +// Timestamp(time.Now().UTC()) +func Now() uint64 { return Timestamp(time.Now().UTC()) } + +// Timestamp converts a time.Time to Unix milliseconds. +// +// Because of the way ULID stores time, times from the year +// 10889 produces undefined results. +func Timestamp(t time.Time) uint64 { + return uint64(t.Unix())*1000 + + uint64(t.Nanosecond()/int(time.Millisecond)) +} + +// Time converts Unix milliseconds in the format +// returned by the Timestamp function to a time.Time. +func Time(ms uint64) time.Time { + s := int64(ms / 1e3) + ns := int64((ms % 1e3) * 1e6) + return time.Unix(s, ns) +} + +// SetTime sets the time component of the ULID to the given Unix time +// in milliseconds. +func (id *ULID) SetTime(ms uint64) error { + if ms > maxTime { + return ErrBigTime + } + + (*id)[0] = byte(ms >> 40) + (*id)[1] = byte(ms >> 32) + (*id)[2] = byte(ms >> 24) + (*id)[3] = byte(ms >> 16) + (*id)[4] = byte(ms >> 8) + (*id)[5] = byte(ms) + + return nil +} + +// Entropy returns the entropy from the ULID. +func (id ULID) Entropy() []byte { + e := make([]byte, 10) + copy(e, id[6:]) + return e +} + +// SetEntropy sets the ULID entropy to the passed byte slice. +// ErrDataSize is returned if len(e) != 10. +func (id *ULID) SetEntropy(e []byte) error { + if len(e) != 10 { + return ErrDataSize + } + + copy((*id)[6:], e) + return nil +} + +// Compare returns an integer comparing id and other lexicographically. +// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. +func (id ULID) Compare(other ULID) int { + return bytes.Compare(id[:], other[:]) +} + +// Scan implements the sql.Scanner interface. It supports scanning +// a string or byte slice. +func (id *ULID) Scan(src interface{}) error { + switch x := src.(type) { + case nil: + return nil + case string: + return id.UnmarshalText([]byte(x)) + case []byte: + return id.UnmarshalBinary(x) + } + + return ErrScanValue +} + +// Value implements the sql/driver.Valuer interface. This returns the value +// represented as a byte slice. If instead a string is desirable, a wrapper +// type can be created that calls String(). +// +// // stringValuer wraps a ULID as a string-based driver.Valuer. +// type stringValuer ULID +// +// func (id stringValuer) Value() (driver.Value, error) { +// return ULID(id).String(), nil +// } +// +// // Example usage. +// db.Exec("...", stringValuer(id)) +func (id ULID) Value() (driver.Value, error) { + return id.MarshalBinary() +} + +// Monotonic returns an entropy source that is guaranteed to yield +// strictly increasing entropy bytes for the same ULID timestamp. +// On conflicts, the previous ULID entropy is incremented with a +// random number between 1 and `inc` (inclusive). +// +// The provided entropy source must actually yield random bytes or else +// monotonic reads are not guaranteed to terminate, since there isn't +// enough randomness to compute an increment number. +// +// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. +// The lower the value of `inc`, the easier the next ULID within the +// same millisecond is to guess. If your code depends on ULIDs having +// secure entropy bytes, then don't go under this default unless you know +// what you're doing. +// +// The returned io.Reader isn't safe for concurrent use. +func Monotonic(entropy io.Reader, inc uint64) io.Reader { + m := monotonic{ + Reader: bufio.NewReader(entropy), + inc: inc, + } + + if m.inc == 0 { + m.inc = math.MaxUint32 + } + + if rng, ok := entropy.(*rand.Rand); ok { + m.rng = rng + } + + return &m +} + +type monotonic struct { + io.Reader + ms uint64 + inc uint64 + entropy uint80 + rand [8]byte + rng *rand.Rand +} + +func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { + if !m.entropy.IsZero() && m.ms == ms { + err = m.increment() + m.entropy.AppendTo(entropy) + } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { + m.ms = ms + m.entropy.SetBytes(entropy) + } + return err +} + +// increment the previous entropy number with a random number +// of up to m.inc (inclusive). +func (m *monotonic) increment() error { + if inc, err := m.random(); err != nil { + return err + } else if m.entropy.Add(inc) { + return ErrMonotonicOverflow + } + return nil +} + +// random returns a uniform random value in [1, m.inc), reading entropy +// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. +// Adapted from: https://golang.org/pkg/crypto/rand/#Int +func (m *monotonic) random() (inc uint64, err error) { + if m.inc <= 1 { + return 1, nil + } + + // Fast path for using a underlying rand.Rand directly. + if m.rng != nil { + // Range: [1, m.inc) + return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil + } + + // bitLen is the maximum bit length needed to encode a value < m.inc. + bitLen := bits.Len64(m.inc) + + // byteLen is the maximum byte length needed to encode a value < m.inc. + byteLen := uint(bitLen+7) / 8 + + // msbitLen is the number of bits in the most significant byte of m.inc-1. + msbitLen := uint(bitLen % 8) + if msbitLen == 0 { + msbitLen = 8 + } + + for inc == 0 || inc >= m.inc { + if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { + return 0, err + } + + // Clear bits in the first byte to increase the probability + // that the candidate is < m.inc. + m.rand[0] &= uint8(int(1< len(ev.providers) { + return nil, errors.New("invalid threshold") + } + + if len(usedKeyids) < ev.threshold { + return acceptedKeys, fmt.Errorf("accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold) + } + + return acceptedKeys, nil +} + +func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) { + return NewMultiEnvelopeVerifier(1, v...) +} + +func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) { + if threshold <= 0 || threshold > len(p) { + return nil, errors.New("invalid threshold") + } + + ev := EnvelopeVerifier{ + providers: p, + threshold: threshold, + } + + return &ev, nil +} + +func SHA256KeyID(pub crypto.PublicKey) (string, error) { + // Generate public key fingerprint + sshpk, err := ssh.NewPublicKey(pub) + if err != nil { + return "", err + } + fingerprint := ssh.FingerprintSHA256(sshpk) + return fingerprint, nil +} + +func removeIndex(v []Verifier, index int) []Verifier { + return append(v[:index], v[index+1:]...) +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go new file mode 100644 index 00000000000..691091af99a --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -0,0 +1,119 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "fmt" + "os" +) + +const ( + ECDSAKeyType = "ecdsa" + ECDSAKeyScheme = "ecdsa-sha2-nistp256" +) + +// ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using ECDSA keys. +type ECDSASignerVerifier struct { + keyID string + curveSize int + private *ecdsa.PrivateKey + public *ecdsa.PublicKey +} + +// NewECDSASignerVerifierFromSSLibKey creates an ECDSASignerVerifier from an +// SSLibKey. +func NewECDSASignerVerifierFromSSLibKey(key *SSLibKey) (*ECDSASignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv := &ECDSASignerVerifier{ + keyID: key.KeyID, + curveSize: publicParsedKey.(*ecdsa.PublicKey).Params().BitSize, + public: publicParsedKey.(*ecdsa.PublicKey), + private: nil, + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv.private = privateParsedKey.(*ecdsa.PrivateKey) + } + + return sv, nil +} + +// Sign creates a signature for `data`. +func (sv *ECDSASignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := getECDSAHashedData(data, sv.curveSize) + + return ecdsa.SignASN1(rand.Reader, sv.private, hashedData) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ECDSASignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := getECDSAHashedData(data, sv.curveSize) + + if ok := ecdsa.VerifyASN1(sv.public, hashedData, sig); !ok { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in +// a file in the custom securesystemslib format. +// +// Deprecated: use LoadKey(). The custom serialization format has been +// deprecated. Use +// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py +// to convert your key. +func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ECDSA key from file: %w", err) + } + + return LoadKeyFromSSLibBytes(contents) +} + +func getECDSAHashedData(data []byte, curveSize int) []byte { + switch { + case curveSize <= 256: + return hashBeforeSigning(data, sha256.New()) + case 256 < curveSize && curveSize <= 384: + return hashBeforeSigning(data, sha512.New384()) + case curveSize > 384: + return hashBeforeSigning(data, sha512.New()) + } + return []byte{} +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go new file mode 100644 index 00000000000..d954e14b749 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -0,0 +1,103 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ed25519" + "encoding/hex" + "fmt" + "os" +) + +const ED25519KeyType = "ed25519" + +// ED25519SignerVerifier is a dsse.SignerVerifier compliant interface to sign +// and verify signatures using ED25519 keys. +type ED25519SignerVerifier struct { + keyID string + private ed25519.PrivateKey + public ed25519.PublicKey +} + +// NewED25519SignerVerifierFromSSLibKey creates an Ed25519SignerVerifier from an +// SSLibKey. +func NewED25519SignerVerifierFromSSLibKey(key *SSLibKey) (*ED25519SignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + public, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + var private []byte + if len(key.KeyVal.Private) > 0 { + private, err = hex.DecodeString(key.KeyVal.Private) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + // python-securesystemslib provides an interface to generate ed25519 + // keys but it differs slightly in how it serializes the key to disk. + // Specifically, the keyval.private field includes _only_ the private + // portion of the key while libraries such as crypto/ed25519 also expect + // the public portion. So, if the private portion is half of what we + // expect, we append the public portion as well. + if len(private) == ed25519.PrivateKeySize/2 { + private = append(private, public...) + } + } + + return &ED25519SignerVerifier{ + keyID: key.KeyID, + public: ed25519.PublicKey(public), + private: ed25519.PrivateKey(private), + }, nil +} + +// Sign creates a signature for `data`. +func (sv *ED25519SignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if len(sv.private) == 0 { + return nil, ErrNotPrivateKey + } + + signature := ed25519.Sign(sv.private, data) + return signature, nil +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ED25519SignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + if ok := ed25519.Verify(sv.public, data, sig); ok { + return nil + } + return ErrSignatureVerificationFailed +} + +// KeyID returns the identifier of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored +// in a file in the custom securesystemslib format. +// +// Deprecated: use LoadKey(). The custom serialization format has been +// deprecated. Use +// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py +// to convert your key. +func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ED25519 key from file: %w", err) + } + + return LoadKeyFromSSLibBytes(contents) +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go new file mode 100644 index 00000000000..2abfcb27c4b --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -0,0 +1,170 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "fmt" + "os" + "strings" +) + +const ( + RSAKeyType = "rsa" + RSAKeyScheme = "rsassa-pss-sha256" + RSAPrivateKeyPEM = "RSA PRIVATE KEY" +) + +// RSAPSSSignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using RSA keys following the RSA-PSS scheme. +type RSAPSSSignerVerifier struct { + keyID string + private *rsa.PrivateKey + public *rsa.PublicKey +} + +// NewRSAPSSSignerVerifierFromSSLibKey creates an RSAPSSSignerVerifier from an +// SSLibKey. +func NewRSAPSSSignerVerifierFromSSLibKey(key *SSLibKey) (*RSAPSSSignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: privateParsedKey.(*rsa.PrivateKey), + }, nil + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: nil, + }, nil +} + +// Sign creates a signature for `data`. +func (sv *RSAPSSSignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := hashBeforeSigning(data, sha256.New()) + + return rsa.SignPSS(rand.Reader, sv.private, crypto.SHA256, hashedData, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *RSAPSSSignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := hashBeforeSigning(data, sha256.New()) + + if err := rsa.VerifyPSS(sv.public, crypto.SHA256, hashedData, sig, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}); err != nil { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a +// file. +// +// Deprecated: use LoadKey(). The custom serialization format has been +// deprecated. Use +// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py +// to convert your key. +func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + return LoadRSAPSSKeyFromBytes(contents) +} + +// LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This +// byte array should represent a PEM encoded RSA key, as PEM encoding is +// required. The function returns an SSLibKey instance, which is a struct that +// holds the key data. +// +// Deprecated: use LoadKey() for all key types, RSA is no longer the only key +// that uses PEM serialization. +func LoadRSAPSSKeyFromBytes(contents []byte) (*SSLibKey, error) { + pemData, keyObj, err := decodeAndParsePEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + key := &SSLibKey{ + KeyType: RSAKeyType, + Scheme: RSAKeyScheme, + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyVal: KeyVal{}, + } + + pubKeyBytes, err := marshalAndGeneratePEM(keyObj) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyVal.Public = strings.TrimSpace(string(pubKeyBytes)) + + if _, ok := keyObj.(*rsa.PrivateKey); ok { + key.KeyVal.Private = strings.TrimSpace(string(generatePEMBlock(pemData.Bytes, RSAPrivateKeyPEM))) + } + + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyID = keyID + } + + return key, nil +} + +func marshalAndGeneratePEM(key interface{}) ([]byte, error) { + var pubKeyBytes []byte + var err error + + switch k := key.(type) { + case *rsa.PublicKey: + pubKeyBytes, err = x509.MarshalPKIXPublicKey(k) + case *rsa.PrivateKey: + pubKeyBytes, err = x509.MarshalPKIXPublicKey(k.Public()) + default: + return nil, fmt.Errorf("unexpected key type: %T", k) + } + + if err != nil { + return nil, err + } + + return generatePEMBlock(pubKeyBytes, PublicKeyPEM), nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go new file mode 100644 index 00000000000..3a8259dfda3 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go @@ -0,0 +1,146 @@ +package signerverifier + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "errors" + "strings" +) + +var KeyIDHashAlgorithms = []string{"sha256", "sha512"} + +var ( + ErrNotPrivateKey = errors.New("loaded key is not a private key") + ErrSignatureVerificationFailed = errors.New("failed to verify signature") + ErrUnknownKeyType = errors.New("unknown key type") + ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys") + ErrInvalidKey = errors.New("key object has no value") + ErrInvalidPEM = errors.New("unable to parse PEM block") +) + +const ( + PublicKeyPEM = "PUBLIC KEY" + PrivateKeyPEM = "PRIVATE KEY" +) + +type SSLibKey struct { + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` + KeyID string `json:"keyid"` +} + +type KeyVal struct { + Private string `json:"private,omitempty"` + Public string `json:"public,omitempty"` + Certificate string `json:"certificate,omitempty"` + Identity string `json:"identity,omitempty"` + Issuer string `json:"issuer,omitempty"` +} + +// LoadKey returns an SSLibKey object when provided a PEM encoded key. +// Currently, RSA, ED25519, and ECDSA keys are supported. +func LoadKey(keyBytes []byte) (*SSLibKey, error) { + pemBlock, rawKey, err := decodeAndParsePEM(keyBytes) + if err != nil { + return nil, err + } + + var key *SSLibKey + switch k := rawKey.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: RSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + }, + Scheme: RSAKeyScheme, + } + + case *rsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public()) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: RSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + Private: strings.TrimSpace(string(generatePEMBlock(pemBlock.Bytes, pemBlock.Type))), + }, + Scheme: RSAKeyScheme, + } + + case ed25519.PublicKey: + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ED25519KeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(k)), + }, + Scheme: ED25519KeyType, + } + + case ed25519.PrivateKey: + pubKeyBytes := k.Public() + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ED25519KeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes.(ed25519.PublicKey))), + Private: strings.TrimSpace(hex.EncodeToString(k)), + }, + Scheme: ED25519KeyType, + } + + case *ecdsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ECDSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + }, + Scheme: ECDSAKeyScheme, + } + + case *ecdsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public()) + if err != nil { + return nil, err + } + key = &SSLibKey{ + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyType: ECDSAKeyType, + KeyVal: KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))), + Private: strings.TrimSpace(string(generatePEMBlock(pemBlock.Bytes, PrivateKeyPEM))), + }, + Scheme: ECDSAKeyScheme, + } + + default: + return nil, ErrUnknownKeyType + } + + keyID, err := calculateKeyID(key) + if err != nil { + return nil, err + } + key.KeyID = keyID + + return key, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go new file mode 100644 index 00000000000..cc0188be3e9 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go @@ -0,0 +1,142 @@ +package signerverifier + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "hash" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" +) + +/* +Credits: Parts of this file were originally authored for in-toto-golang. +*/ + +var ( + // ErrNoPEMBlock gets triggered when there is no PEM block in the provided file + ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + // ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails + ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") +) + +// LoadKeyFromSSLibBytes returns a pointer to a Key instance created from the +// contents of the bytes. The key contents are expected to be in the custom +// securesystemslib format. +// +// Deprecated: use LoadKey() for all key types, RSA is no longer the only key +// that uses PEM serialization. +func LoadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) { + var key *SSLibKey + if err := json.Unmarshal(contents, &key); err != nil { + return LoadRSAPSSKeyFromBytes(contents) + } + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, err + } + key.KeyID = keyID + } + + return key, nil +} + +func calculateKeyID(k *SSLibKey) (string, error) { + key := map[string]any{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + canonical, err := cjson.EncodeCanonical(key) + if err != nil { + return "", err + } + digest := sha256.Sum256(canonical) + return hex.EncodeToString(digest[:]), nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +decodeAndParsePEM receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParsePEM(pemBytes []byte) (*pem.Block, any, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parsePEMKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + - PKCS8 + - PKCS1 + - PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parsePEMKey(data []byte) (any, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +func hashBeforeSigning(data []byte, h hash.Hash) []byte { + h.Write(data) + return h.Sum(nil) +} diff --git a/vendor/github.com/shibumi/go-pathspec/.gitignore b/vendor/github.com/shibumi/go-pathspec/.gitignore new file mode 100644 index 00000000000..3e32393f123 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +# ignore .idea +.idea diff --git a/vendor/github.com/shibumi/go-pathspec/GO-LICENSE b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/shibumi/go-pathspec/LICENSE b/vendor/github.com/shibumi/go-pathspec/LICENSE new file mode 100644 index 00000000000..5c304d1a4a7 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/shibumi/go-pathspec/README.md b/vendor/github.com/shibumi/go-pathspec/README.md new file mode 100644 index 00000000000..c146cf69b01 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/README.md @@ -0,0 +1,45 @@ +# go-pathspec + +[![build](https://github.com/shibumi/go-pathspec/workflows/build/badge.svg)](https://github.com/shibumi/go-pathspec/actions?query=workflow%3Abuild) [![Coverage Status](https://coveralls.io/repos/github/shibumi/go-pathspec/badge.svg)](https://coveralls.io/github/shibumi/go-pathspec) [![PkgGoDev](https://pkg.go.dev/badge/github.com/shibumi/go-pathspec)](https://pkg.go.dev/github.com/shibumi/go-pathspec) + +go-pathspec implements gitignore-style pattern matching for paths. + +## Alternatives + +There are a few alternatives, that try to be gitignore compatible or even state +gitignore compatibility: + +### https://github.com/go-git/go-git + +go-git states it would be gitignore compatible, but actually they are missing a few +special cases. This issue describes one of the not working patterns: https://github.com/go-git/go-git/issues/108 + +What does not work is global filename pattern matching. Consider the following +`.gitignore` file: + +```gitignore +# gitignore test file +parse.go +``` + +Then `parse.go` should match on all filenames called `parse.go`. You can test this via +this shell script: +```shell +mkdir -p /tmp/test/internal/util +touch /tmp/test/internal/util/parse.go +cd /tmp/test/ +git init +echo "parse.go" > .gitignore +``` + +With git `parse.go` will be excluded. The go-git implementation behaves different. + +### https://github.com/monochromegane/go-gitignore + +monochromegane's go-gitignore does not support the use of `**`-operators. +This is not consistent to real gitignore behavior, too. + +## Authors + +Sander van Harmelen () +Christian Rebischke () diff --git a/vendor/github.com/shibumi/go-pathspec/gitignore.go b/vendor/github.com/shibumi/go-pathspec/gitignore.go new file mode 100644 index 00000000000..2b08d4e8a57 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/gitignore.go @@ -0,0 +1,299 @@ +// +// Copyright 2014, Sander van Harmelen +// Copyright 2020, Christian Rebischke +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package pathspec implements git compatible gitignore pattern matching. +// See the description below, if you are unfamiliar with it: +// +// A blank line matches no files, so it can serve as a separator for readability. +// +// A line starting with # serves as a comment. Put a backslash ("\") in front of +// the first hash for patterns that begin with a hash. +// +// An optional prefix "!" which negates the pattern; any matching file excluded +// by a previous pattern will become included again. If a negated pattern matches, +// this will override lower precedence patterns sources. Put a backslash ("\") in +// front of the first "!" for patterns that begin with a literal "!", for example, +// "\!important!.txt". +// +// If the pattern ends with a slash, it is removed for the purpose of the following +// description, but it would only find a match with a directory. In other words, +// foo/ will match a directory foo and paths underneath it, but will not match a +// regular file or a symbolic link foo (this is consistent with the way how pathspec +// works in general in Git). +// +// If the pattern does not contain a slash /, Git treats it as a shell glob pattern +// and checks for a match against the pathname relative to the location of the +// .gitignore file (relative to the toplevel of the work tree if not from a +// .gitignore file). +// +// Otherwise, Git treats the pattern as a shell glob suitable for consumption by +// fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will not match +// a / in the pathname. For example, "Documentation/*.html" matches +// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or/ +// "tools/perf/Documentation/perf.html". +// +// A leading slash matches the beginning of the pathname. For example, "/*.c" +// matches "cat-file.c" but not "mozilla-sha1/sha1.c". +// +// Two consecutive asterisks ("**") in patterns matched against full pathname +// may have special meaning: +// +// A leading "**" followed by a slash means match in all directories. For example, +// "**/foo" matches file or directory "foo" anywhere, the same as pattern "foo". +// "**/foo/bar" matches file or directory "bar" anywhere that is directly under +// directory "foo". +// +// A trailing "/" matches everything inside. For example, "abc/" matches all files +// inside directory "abc", relative to the location of the .gitignore file, with +// infinite depth. +// +// A slash followed by two consecutive asterisks then a slash matches zero or more +// directories. For example, "a/**/b" matches "a/b", "a/x/b", "a/x/y/b" and so on. +// +// Other consecutive asterisks are considered invalid. +package pathspec + +import ( + "bufio" + "bytes" + "io" + "path/filepath" + "regexp" + "strings" +) + +type gitIgnorePattern struct { + Regex string + Include bool +} + +// GitIgnore uses a string slice of patterns for matching on a filepath string. +// On match it returns true, otherwise false. On error it passes the error through. +func GitIgnore(patterns []string, name string) (ignore bool, err error) { + for _, pattern := range patterns { + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, nil + } + ignore = true + } + } + return ignore, nil +} + +// ReadGitIgnore implements the io.Reader interface for reading a gitignore file +// line by line. It behaves exactly like the GitIgnore function. The only difference +// is that GitIgnore works on a string slice. +// +// ReadGitIgnore returns a boolean value if we match or not and an error. +func ReadGitIgnore(content io.Reader, name string) (ignore bool, err error) { + scanner := bufio.NewScanner(content) + + for scanner.Scan() { + pattern := strings.TrimSpace(scanner.Text()) + if len(pattern) == 0 || pattern[0] == '#' { + continue + } + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, scanner.Err() + } + ignore = true + } + } + return ignore, scanner.Err() +} + +func parsePattern(pattern string) *gitIgnorePattern { + p := &gitIgnorePattern{} + + // An optional prefix "!" which negates the pattern; any matching file + // excluded by a previous pattern will become included again. + if strings.HasPrefix(pattern, "!") { + pattern = pattern[1:] + p.Include = true + } else { + p.Include = false + } + + // Remove leading back-slash escape for escaped hash ('#') or + // exclamation mark ('!'). + if strings.HasPrefix(pattern, "\\") { + pattern = pattern[1:] + } + + // Split pattern into segments. + patternSegs := strings.Split(pattern, "/") + + // A pattern beginning with a slash ('/') will only match paths + // directly on the root directory instead of any descendant paths. + // So remove empty first segment to make pattern absoluut to root. + // A pattern without a beginning slash ('/') will match any + // descendant path. This is equivilent to "**/{pattern}". So + // prepend with double-asterisks to make pattern relative to + // root. + if patternSegs[0] == "" { + patternSegs = patternSegs[1:] + } else if patternSegs[0] != "**" { + patternSegs = append([]string{"**"}, patternSegs...) + } + + // A pattern ending with a slash ('/') will match all descendant + // paths of if it is a directory but not if it is a regular file. + // This is equivalent to "{pattern}/**". So, set last segment to + // double asterisks to include all descendants. + if patternSegs[len(patternSegs)-1] == "" { + patternSegs[len(patternSegs)-1] = "**" + } + + // Build regular expression from pattern. + var expr bytes.Buffer + expr.WriteString("^") + needSlash := false + + for i, seg := range patternSegs { + switch seg { + case "**": + switch { + case i == 0 && i == len(patternSegs)-1: + // A pattern consisting solely of double-asterisks ('**') + // will match every path. + expr.WriteString(".+") + case i == 0: + // A normalized pattern beginning with double-asterisks + // ('**') will match any leading path segments. + expr.WriteString("(?:.+/)?") + needSlash = false + case i == len(patternSegs)-1: + // A normalized pattern ending with double-asterisks ('**') + // will match any trailing path segments. + expr.WriteString("/.+") + default: + // A pattern with inner double-asterisks ('**') will match + // multiple (or zero) inner path segments. + expr.WriteString("(?:/.+)?") + needSlash = true + } + case "*": + // Match single path segment. + if needSlash { + expr.WriteString("/") + } + expr.WriteString("[^/]+") + needSlash = true + default: + // Match segment glob pattern. + if needSlash { + expr.WriteString("/") + } + expr.WriteString(translateGlob(seg)) + needSlash = true + } + } + expr.WriteString("$") + p.Regex = expr.String() + return p +} + +// NOTE: This is derived from `fnmatch.translate()` and is similar to +// the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. +func translateGlob(glob string) string { + var regex bytes.Buffer + escape := false + + for i := 0; i < len(glob); i++ { + char := glob[i] + // Escape the character. + switch { + case escape: + escape = false + regex.WriteString(regexp.QuoteMeta(string(char))) + case char == '\\': + // Escape character, escape next character. + escape = true + case char == '*': + // Multi-character wildcard. Match any string (except slashes), + // including an empty string. + regex.WriteString("[^/]*") + case char == '?': + // Single-character wildcard. Match any single character (except + // a slash). + regex.WriteString("[^/]") + case char == '[': + regex.WriteString(translateBracketExpression(&i, glob)) + default: + // Regular character, escape it for regex. + regex.WriteString(regexp.QuoteMeta(string(char))) + } + } + return regex.String() +} + +// Bracket expression wildcard. Except for the beginning +// exclamation mark, the whole bracket expression can be used +// directly as regex but we have to find where the expression +// ends. +// - "[][!]" matches ']', '[' and '!'. +// - "[]-]" matches ']' and '-'. +// - "[!]a-]" matches any character except ']', 'a' and '-'. +func translateBracketExpression(i *int, glob string) string { + regex := string(glob[*i]) + *i++ + j := *i + + // Pass bracket expression negation. + if j < len(glob) && glob[j] == '!' { + j++ + } + // Pass first closing bracket if it is at the beginning of the + // expression. + if j < len(glob) && glob[j] == ']' { + j++ + } + // Find closing bracket. Stop once we reach the end or find it. + for j < len(glob) && glob[j] != ']' { + j++ + } + + if j < len(glob) { + if glob[*i] == '!' { + regex = regex + "^" + *i++ + } + regex = regexp.QuoteMeta(glob[*i:j]) + *i = j + } else { + // Failed to find closing bracket, treat opening bracket as a + // bracket literal instead of as an expression. + regex = regexp.QuoteMeta(string(glob[*i])) + } + return "[" + regex + "]" +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go new file mode 100644 index 00000000000..9d43f66c0f2 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go @@ -0,0 +1,543 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: sigstore_bundle.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + dsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + v11 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Various timestamped counter signatures over the artifacts signature. +// Currently only RFC3161 signatures are provided. More formats may be added +// in the future. +type TimestampVerificationData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // A list of RFC3161 signed timestamps provided by the user. + // This can be used when the entry has not been stored on a + // transparency log, or in conjunction for a stronger trust model. + // Clients MUST verify the hashed message in the message imprint + // against the signature in the bundle. + Rfc3161Timestamps []*v1.RFC3161SignedTimestamp `protobuf:"bytes,1,rep,name=rfc3161_timestamps,json=rfc3161Timestamps,proto3" json:"rfc3161_timestamps,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimestampVerificationData) Reset() { + *x = TimestampVerificationData{} + mi := &file_sigstore_bundle_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimestampVerificationData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimestampVerificationData) ProtoMessage() {} + +func (x *TimestampVerificationData) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimestampVerificationData.ProtoReflect.Descriptor instead. +func (*TimestampVerificationData) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{0} +} + +func (x *TimestampVerificationData) GetRfc3161Timestamps() []*v1.RFC3161SignedTimestamp { + if x != nil { + return x.Rfc3161Timestamps + } + return nil +} + +// VerificationMaterial captures details on the materials used to verify +// signatures. This message may be embedded in a DSSE envelope as a signature +// extension. Specifically, the `ext` field of the extension will expect this +// message when the signature extension is for Sigstore. This is identified by +// the `kind` field in the extension, which must be set to +// application/vnd.dev.sigstore.verificationmaterial;version=0.1 for Sigstore. +// When used as a DSSE extension, if the `public_key` field is used to indicate +// the key identifier, it MUST match the `keyid` field of the signature the +// extension is attached to. +type VerificationMaterial struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The key material for verification purposes. + // + // This allows key material to be conveyed in one of three forms: + // + // 1. An unspecified public key identifier, for retrieving a key + // from an out-of-band mechanism (such as a keyring); + // + // 2. A sequence of one or more X.509 certificates, of which the first member + // MUST be a leaf certificate conveying the signing key. Subsequent members + // SHOULD be in issuing order, meaning that `n + 1` should be an issuer for `n`. + // + // Signers MUST NOT include root CA certificates in bundles, and SHOULD NOT + // include intermediate CA certificates that appear in an independent root of trust + // (such as the Public Good Instance's trusted root). + // + // Verifiers MUST validate the chain carefully to ensure that it chains up + // to a CA certificate that they independently trust. Verifiers SHOULD + // handle old or non-complying bundles that have superfluous intermediate and/or + // root CA certificates by either ignoring them or explicitly considering them + // untrusted for the purposes of chain building. + // + // 3. A single X.509 certificate, which MUST be a leaf certificate conveying + // the signing key. + // + // When used with the Public Good Instance (PGI) of Sigstore for "keyless" signing + // via Fulcio, form (1) MUST NOT be used, regardless of bundle version. Form (1) + // MAY be used with the PGI for self-managed keys. + // + // When used in a `0.1` or `0.2` bundle with the PGI and "keyless" signing, + // form (2) MUST be used. + // + // When used in a `0.3` bundle with the PGI and "keyless" signing, + // form (3) MUST be used. + // + // Types that are valid to be assigned to Content: + // + // *VerificationMaterial_PublicKey + // *VerificationMaterial_X509CertificateChain + // *VerificationMaterial_Certificate + Content isVerificationMaterial_Content `protobuf_oneof:"content"` + // An inclusion proof and an optional signed timestamp from the log. + // Client verification libraries MAY provide an option to support v0.1 + // bundles for backwards compatibility, which may contain an inclusion + // promise and not an inclusion proof. In this case, the client MUST + // validate the promise. + // Verifiers SHOULD NOT allow v0.1 bundles if they're used in an + // ecosystem which never produced them. + TlogEntries []*v11.TransparencyLogEntry `protobuf:"bytes,3,rep,name=tlog_entries,json=tlogEntries,proto3" json:"tlog_entries,omitempty"` + // Timestamp may also come from + // tlog_entries.inclusion_promise.signed_entry_timestamp. + TimestampVerificationData *TimestampVerificationData `protobuf:"bytes,4,opt,name=timestamp_verification_data,json=timestampVerificationData,proto3" json:"timestamp_verification_data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerificationMaterial) Reset() { + *x = VerificationMaterial{} + mi := &file_sigstore_bundle_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerificationMaterial) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerificationMaterial) ProtoMessage() {} + +func (x *VerificationMaterial) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerificationMaterial.ProtoReflect.Descriptor instead. +func (*VerificationMaterial) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{1} +} + +func (x *VerificationMaterial) GetContent() isVerificationMaterial_Content { + if x != nil { + return x.Content + } + return nil +} + +func (x *VerificationMaterial) GetPublicKey() *v1.PublicKeyIdentifier { + if x != nil { + if x, ok := x.Content.(*VerificationMaterial_PublicKey); ok { + return x.PublicKey + } + } + return nil +} + +func (x *VerificationMaterial) GetX509CertificateChain() *v1.X509CertificateChain { + if x != nil { + if x, ok := x.Content.(*VerificationMaterial_X509CertificateChain); ok { + return x.X509CertificateChain + } + } + return nil +} + +func (x *VerificationMaterial) GetCertificate() *v1.X509Certificate { + if x != nil { + if x, ok := x.Content.(*VerificationMaterial_Certificate); ok { + return x.Certificate + } + } + return nil +} + +func (x *VerificationMaterial) GetTlogEntries() []*v11.TransparencyLogEntry { + if x != nil { + return x.TlogEntries + } + return nil +} + +func (x *VerificationMaterial) GetTimestampVerificationData() *TimestampVerificationData { + if x != nil { + return x.TimestampVerificationData + } + return nil +} + +type isVerificationMaterial_Content interface { + isVerificationMaterial_Content() +} + +type VerificationMaterial_PublicKey struct { + PublicKey *v1.PublicKeyIdentifier `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3,oneof"` +} + +type VerificationMaterial_X509CertificateChain struct { + X509CertificateChain *v1.X509CertificateChain `protobuf:"bytes,2,opt,name=x509_certificate_chain,json=x509CertificateChain,proto3,oneof"` +} + +type VerificationMaterial_Certificate struct { + Certificate *v1.X509Certificate `protobuf:"bytes,5,opt,name=certificate,proto3,oneof"` +} + +func (*VerificationMaterial_PublicKey) isVerificationMaterial_Content() {} + +func (*VerificationMaterial_X509CertificateChain) isVerificationMaterial_Content() {} + +func (*VerificationMaterial_Certificate) isVerificationMaterial_Content() {} + +type Bundle struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.bundle.v0.3+json when + // when encoded as JSON. + // Clients must to be able to accept media type using the previously + // defined formats: + // * application/vnd.dev.sigstore.bundle+json;version=0.1 + // * application/vnd.dev.sigstore.bundle+json;version=0.2 + // * application/vnd.dev.sigstore.bundle+json;version=0.3 + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // When a signer is identified by a X.509 certificate, a verifier MUST + // verify that the signature was computed at the time the certificate + // was valid as described in the Sigstore client spec: "Verification + // using a Bundle". + // + // If the verification material contains a public key identifier + // (key hint) and the `content` is a DSSE envelope, the key hints + // MUST be exactly the same in the verification material and in the + // DSSE envelope. + VerificationMaterial *VerificationMaterial `protobuf:"bytes,2,opt,name=verification_material,json=verificationMaterial,proto3" json:"verification_material,omitempty"` + // Types that are valid to be assigned to Content: + // + // *Bundle_MessageSignature + // *Bundle_DsseEnvelope + Content isBundle_Content `protobuf_oneof:"content"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Bundle) Reset() { + *x = Bundle{} + mi := &file_sigstore_bundle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Bundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bundle) ProtoMessage() {} + +func (x *Bundle) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bundle.ProtoReflect.Descriptor instead. +func (*Bundle) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{2} +} + +func (x *Bundle) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *Bundle) GetVerificationMaterial() *VerificationMaterial { + if x != nil { + return x.VerificationMaterial + } + return nil +} + +func (x *Bundle) GetContent() isBundle_Content { + if x != nil { + return x.Content + } + return nil +} + +func (x *Bundle) GetMessageSignature() *v1.MessageSignature { + if x != nil { + if x, ok := x.Content.(*Bundle_MessageSignature); ok { + return x.MessageSignature + } + } + return nil +} + +func (x *Bundle) GetDsseEnvelope() *dsse.Envelope { + if x != nil { + if x, ok := x.Content.(*Bundle_DsseEnvelope); ok { + return x.DsseEnvelope + } + } + return nil +} + +type isBundle_Content interface { + isBundle_Content() +} + +type Bundle_MessageSignature struct { + MessageSignature *v1.MessageSignature `protobuf:"bytes,3,opt,name=message_signature,json=messageSignature,proto3,oneof"` +} + +type Bundle_DsseEnvelope struct { + // A DSSE envelope can contain arbitrary payloads. + // Verifiers must verify that the payload type is a + // supported and expected type. This is part of the DSSE + // protocol which is defined here: + // + // DSSE envelopes in a bundle MUST have exactly one signature. + // This is a limitation from the DSSE spec, as it can contain + // multiple signatures. There are two primary reasons: + // 1. It simplifies the verification logic and policy + // 2. The bundle (currently) can only contain a single + // instance of the required verification materials + // + // During verification a client MUST reject an envelope if + // the number of signatures is not equal to one. + DsseEnvelope *dsse.Envelope `protobuf:"bytes,4,opt,name=dsse_envelope,json=dsseEnvelope,proto3,oneof"` +} + +func (*Bundle_MessageSignature) isBundle_Content() {} + +func (*Bundle_DsseEnvelope) isBundle_Content() {} + +var File_sigstore_bundle_proto protoreflect.FileDescriptor + +var file_sigstore_bundle_proto_rawDesc = string([]byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7a, 0x0a, + 0x19, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5d, 0x0a, 0x12, 0x72, 0x66, + 0x63, 0x33, 0x31, 0x36, 0x31, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x72, 0x66, 0x63, 0x33, 0x31, 0x36, 0x31, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x14, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x69, 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x14, 0x78, 0x35, 0x30, 0x39, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x12, 0x50, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x74, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x71, 0x0a, 0x1b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0xbf, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x14, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x5c, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x73, 0x73, 0x65, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x33, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x42, 0x0b, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, + 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, + 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_bundle_proto_rawDescOnce sync.Once + file_sigstore_bundle_proto_rawDescData []byte +) + +func file_sigstore_bundle_proto_rawDescGZIP() []byte { + file_sigstore_bundle_proto_rawDescOnce.Do(func() { + file_sigstore_bundle_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_bundle_proto_rawDesc), len(file_sigstore_bundle_proto_rawDesc))) + }) + return file_sigstore_bundle_proto_rawDescData +} + +var file_sigstore_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_sigstore_bundle_proto_goTypes = []any{ + (*TimestampVerificationData)(nil), // 0: dev.sigstore.bundle.v1.TimestampVerificationData + (*VerificationMaterial)(nil), // 1: dev.sigstore.bundle.v1.VerificationMaterial + (*Bundle)(nil), // 2: dev.sigstore.bundle.v1.Bundle + (*v1.RFC3161SignedTimestamp)(nil), // 3: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*v1.PublicKeyIdentifier)(nil), // 4: dev.sigstore.common.v1.PublicKeyIdentifier + (*v1.X509CertificateChain)(nil), // 5: dev.sigstore.common.v1.X509CertificateChain + (*v1.X509Certificate)(nil), // 6: dev.sigstore.common.v1.X509Certificate + (*v11.TransparencyLogEntry)(nil), // 7: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.MessageSignature)(nil), // 8: dev.sigstore.common.v1.MessageSignature + (*dsse.Envelope)(nil), // 9: io.intoto.Envelope +} +var file_sigstore_bundle_proto_depIdxs = []int32{ + 3, // 0: dev.sigstore.bundle.v1.TimestampVerificationData.rfc3161_timestamps:type_name -> dev.sigstore.common.v1.RFC3161SignedTimestamp + 4, // 1: dev.sigstore.bundle.v1.VerificationMaterial.public_key:type_name -> dev.sigstore.common.v1.PublicKeyIdentifier + 5, // 2: dev.sigstore.bundle.v1.VerificationMaterial.x509_certificate_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 6, // 3: dev.sigstore.bundle.v1.VerificationMaterial.certificate:type_name -> dev.sigstore.common.v1.X509Certificate + 7, // 4: dev.sigstore.bundle.v1.VerificationMaterial.tlog_entries:type_name -> dev.sigstore.rekor.v1.TransparencyLogEntry + 0, // 5: dev.sigstore.bundle.v1.VerificationMaterial.timestamp_verification_data:type_name -> dev.sigstore.bundle.v1.TimestampVerificationData + 1, // 6: dev.sigstore.bundle.v1.Bundle.verification_material:type_name -> dev.sigstore.bundle.v1.VerificationMaterial + 8, // 7: dev.sigstore.bundle.v1.Bundle.message_signature:type_name -> dev.sigstore.common.v1.MessageSignature + 9, // 8: dev.sigstore.bundle.v1.Bundle.dsse_envelope:type_name -> io.intoto.Envelope + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_bundle_proto_init() } +func file_sigstore_bundle_proto_init() { + if File_sigstore_bundle_proto != nil { + return + } + file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []any{ + (*VerificationMaterial_PublicKey)(nil), + (*VerificationMaterial_X509CertificateChain)(nil), + (*VerificationMaterial_Certificate)(nil), + } + file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []any{ + (*Bundle_MessageSignature)(nil), + (*Bundle_DsseEnvelope)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_bundle_proto_rawDesc), len(file_sigstore_bundle_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_bundle_proto_goTypes, + DependencyIndexes: file_sigstore_bundle_proto_depIdxs, + MessageInfos: file_sigstore_bundle_proto_msgTypes, + }.Build() + File_sigstore_bundle_proto = out.File + file_sigstore_bundle_proto_goTypes = nil + file_sigstore_bundle_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go new file mode 100644 index 00000000000..298e2439ef1 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go @@ -0,0 +1,241 @@ +// https://raw.githubusercontent.com/secure-systems-lab/dsse/9c813476bd36de70a5738c72e784f123ecea16af/envelope.proto + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: envelope.proto + +package dsse + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An authenticated message of arbitrary type. +type Envelope struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Message to be signed. (In JSON, this is encoded as base64.) + // REQUIRED. + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + // String unambiguously identifying how to interpret payload. + // REQUIRED. + PayloadType string `protobuf:"bytes,2,opt,name=payloadType,proto3" json:"payloadType,omitempty"` + // Signature over: + // + // PAE(type, payload) + // + // Where PAE is defined as: + // PAE(type, payload) = "DSSEv1" + SP + LEN(type) + SP + type + SP + LEN(payload) + SP + payload + // + = concatenation + // SP = ASCII space [0x20] + // "DSSEv1" = ASCII [0x44, 0x53, 0x53, 0x45, 0x76, 0x31] + // LEN(s) = ASCII decimal encoding of the byte length of s, with no leading zeros + // REQUIRED (length >= 1). + Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Envelope) Reset() { + *x = Envelope{} + mi := &file_envelope_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Envelope) ProtoMessage() {} + +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. +func (*Envelope) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{0} +} + +func (x *Envelope) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Envelope) GetPayloadType() string { + if x != nil { + return x.PayloadType + } + return "" +} + +func (x *Envelope) GetSignatures() []*Signature { + if x != nil { + return x.Signatures + } + return nil +} + +type Signature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signature itself. (In JSON, this is encoded as base64.) + // REQUIRED. + Sig []byte `protobuf:"bytes,1,opt,name=sig,proto3" json:"sig,omitempty"` + // *Unauthenticated* hint identifying which public key was used. + // OPTIONAL. + Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Signature) Reset() { + *x = Signature{} + mi := &file_envelope_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{1} +} + +func (x *Signature) GetSig() []byte { + if x != nil { + return x.Sig + } + return nil +} + +func (x *Signature) GetKeyid() string { + if x != nil { + return x.Keyid + } + return "" +} + +var File_envelope_proto protoreflect.FileDescriptor + +var file_envelope_proto_rawDesc = string([]byte{ + 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x09, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x08, 0x45, + 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x33, 0x0a, 0x09, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x65, 0x79, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x69, 0x64, 0x42, 0x44, + 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, + 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x64, + 0x73, 0x73, 0x65, 0xea, 0x02, 0x0e, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, + 0x44, 0x53, 0x53, 0x45, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_envelope_proto_rawDescOnce sync.Once + file_envelope_proto_rawDescData []byte +) + +func file_envelope_proto_rawDescGZIP() []byte { + file_envelope_proto_rawDescOnce.Do(func() { + file_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_envelope_proto_rawDesc), len(file_envelope_proto_rawDesc))) + }) + return file_envelope_proto_rawDescData +} + +var file_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envelope_proto_goTypes = []any{ + (*Envelope)(nil), // 0: io.intoto.Envelope + (*Signature)(nil), // 1: io.intoto.Signature +} +var file_envelope_proto_depIdxs = []int32{ + 1, // 0: io.intoto.Envelope.signatures:type_name -> io.intoto.Signature + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envelope_proto_init() } +func file_envelope_proto_init() { + if File_envelope_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_envelope_proto_rawDesc), len(file_envelope_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envelope_proto_goTypes, + DependencyIndexes: file_envelope_proto_depIdxs, + MessageInfos: file_envelope_proto_msgTypes, + }.Build() + File_envelope_proto = out.File + file_envelope_proto_goTypes = nil + file_envelope_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go new file mode 100644 index 00000000000..43b3111ebf8 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go @@ -0,0 +1,560 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: sigstore_rekor.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// KindVersion contains the entry's kind and api version. +type KindVersion struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Kind is the type of entry being stored in the log. + // See here for a list: https://github.com/sigstore/rekor/tree/main/pkg/types + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The specific api version of the type. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KindVersion) Reset() { + *x = KindVersion{} + mi := &file_sigstore_rekor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KindVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KindVersion) ProtoMessage() {} + +func (x *KindVersion) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KindVersion.ProtoReflect.Descriptor instead. +func (*KindVersion) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{0} +} + +func (x *KindVersion) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *KindVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// The checkpoint MUST contain an origin string as a unique log identifier, +// the tree size, and the root hash. It MAY also be followed by optional data, +// and clients MUST NOT assume optional data. The checkpoint MUST also contain +// a signature over the root hash (tree head). The checkpoint MAY contain additional +// signatures, but the first SHOULD be the signature from the log. Checkpoint contents +// are concatenated with newlines into a single string. +// The checkpoint format is described in +// https://github.com/transparency-dev/formats/blob/main/log/README.md +// and https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md. +// An example implementation can be found in https://github.com/sigstore/rekor/blob/main/pkg/util/signed_note.go +type Checkpoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Checkpoint) Reset() { + *x = Checkpoint{} + mi := &file_sigstore_rekor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Checkpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Checkpoint) ProtoMessage() {} + +func (x *Checkpoint) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Checkpoint.ProtoReflect.Descriptor instead. +func (*Checkpoint) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{1} +} + +func (x *Checkpoint) GetEnvelope() string { + if x != nil { + return x.Envelope + } + return "" +} + +// InclusionProof is the proof returned from the transparency log. Can +// be used for offline or online verification against the log. +type InclusionProof struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The index of the entry in the tree it was written to. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The hash digest stored at the root of the merkle tree at the time + // the proof was generated. + RootHash []byte `protobuf:"bytes,2,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + // The size of the merkle tree at the time the proof was generated. + TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"` + // A list of hashes required to compute the inclusion proof, sorted + // in order from leaf to root. + // Note that leaf and root hashes are not included. + // The root hash is available separately in this message, and the + // leaf hash should be calculated by the client. + Hashes [][]byte `protobuf:"bytes,4,rep,name=hashes,proto3" json:"hashes,omitempty"` + // Signature of the tree head, as of the time of this proof was + // generated. See above info on 'Checkpoint' for more details. + Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InclusionProof) Reset() { + *x = InclusionProof{} + mi := &file_sigstore_rekor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InclusionProof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionProof) ProtoMessage() {} + +func (x *InclusionProof) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionProof.ProtoReflect.Descriptor instead. +func (*InclusionProof) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{2} +} + +func (x *InclusionProof) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *InclusionProof) GetRootHash() []byte { + if x != nil { + return x.RootHash + } + return nil +} + +func (x *InclusionProof) GetTreeSize() int64 { + if x != nil { + return x.TreeSize + } + return 0 +} + +func (x *InclusionProof) GetHashes() [][]byte { + if x != nil { + return x.Hashes + } + return nil +} + +func (x *InclusionProof) GetCheckpoint() *Checkpoint { + if x != nil { + return x.Checkpoint + } + return nil +} + +// The inclusion promise is calculated by Rekor. It's calculated as a +// signature over a canonical JSON serialization of the persisted entry, the +// log ID, log index and the integration timestamp. +// See https://github.com/sigstore/rekor/blob/a6e58f72b6b18cc06cefe61808efd562b9726330/pkg/api/entries.go#L54 +// The format of the signature depends on the transparency log's public key. +// If the signature algorithm requires a hash function and/or a signature +// scheme (e.g. RSA) those has to be retrieved out-of-band from the log's +// operators, together with the public key. +// This is used to verify the integration timestamp's value and that the log +// has promised to include the entry. +type InclusionPromise struct { + state protoimpl.MessageState `protogen:"open.v1"` + SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InclusionPromise) Reset() { + *x = InclusionPromise{} + mi := &file_sigstore_rekor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InclusionPromise) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionPromise) ProtoMessage() {} + +func (x *InclusionPromise) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionPromise.ProtoReflect.Descriptor instead. +func (*InclusionPromise) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{3} +} + +func (x *InclusionPromise) GetSignedEntryTimestamp() []byte { + if x != nil { + return x.SignedEntryTimestamp + } + return nil +} + +// TransparencyLogEntry captures all the details required from Rekor to +// reconstruct an entry, given that the payload is provided via other means. +// This type can easily be created from the existing response from Rekor. +// Future iterations could rely on Rekor returning the minimal set of +// attributes (excluding the payload) that are required for verifying the +// inclusion promise. The inclusion promise (called SignedEntryTimestamp in +// the response from Rekor) is similar to a Signed Certificate Timestamp +// as described here https://www.rfc-editor.org/rfc/rfc6962.html#section-3.2. +type TransparencyLogEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The global index of the entry, used when querying the log by index. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The unique identifier of the log. + LogId *v1.LogId `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The kind (type) and version of the object associated with this + // entry. These values are required to construct the entry during + // verification. + KindVersion *KindVersion `protobuf:"bytes,3,opt,name=kind_version,json=kindVersion,proto3" json:"kind_version,omitempty"` + // The UNIX timestamp from the log when the entry was persisted. + // The integration time MUST NOT be trusted if inclusion_promise + // is omitted. + IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` + // The inclusion promise/signed entry timestamp from the log. + // Required for v0.1 bundles, and MUST be verified. + // Optional for >= v0.2 bundles if another suitable source of + // time is present (such as another source of signed time, + // or the current system time for long-lived certificates). + // MUST be verified if no other suitable source of time is present, + // and SHOULD be verified otherwise. + InclusionPromise *InclusionPromise `protobuf:"bytes,5,opt,name=inclusion_promise,json=inclusionPromise,proto3" json:"inclusion_promise,omitempty"` + // The inclusion proof can be used for offline or online verification + // that the entry was appended to the log, and that the log has not been + // altered. + InclusionProof *InclusionProof `protobuf:"bytes,6,opt,name=inclusion_proof,json=inclusionProof,proto3" json:"inclusion_proof,omitempty"` + // Optional. The canonicalized transparency log entry, used to + // reconstruct the Signed Entry Timestamp (SET) during verification. + // The contents of this field are the same as the `body` field in + // a Rekor response, meaning that it does **not** include the "full" + // canonicalized form (of log index, ID, etc.) which are + // exposed as separate fields. The verifier is responsible for + // combining the `canonicalized_body`, `log_index`, `log_id`, + // and `integrated_time` into the payload that the SET's signature + // is generated over. + // This field is intended to be used in cases where the SET cannot be + // produced determinisitically (e.g. inconsistent JSON field ordering, + // differing whitespace, etc). + // + // If set, clients MUST verify that the signature referenced in the + // `canonicalized_body` matches the signature provided in the + // `Bundle.content`. + // If not set, clients are responsible for constructing an equivalent + // payload from other sources to verify the signature. + CanonicalizedBody []byte `protobuf:"bytes,7,opt,name=canonicalized_body,json=canonicalizedBody,proto3" json:"canonicalized_body,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransparencyLogEntry) Reset() { + *x = TransparencyLogEntry{} + mi := &file_sigstore_rekor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransparencyLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogEntry) ProtoMessage() {} + +func (x *TransparencyLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogEntry.ProtoReflect.Descriptor instead. +func (*TransparencyLogEntry) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{4} +} + +func (x *TransparencyLogEntry) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *TransparencyLogEntry) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogEntry) GetKindVersion() *KindVersion { + if x != nil { + return x.KindVersion + } + return nil +} + +func (x *TransparencyLogEntry) GetIntegratedTime() int64 { + if x != nil { + return x.IntegratedTime + } + return 0 +} + +func (x *TransparencyLogEntry) GetInclusionPromise() *InclusionPromise { + if x != nil { + return x.InclusionPromise + } + return nil +} + +func (x *TransparencyLogEntry) GetInclusionProof() *InclusionProof { + if x != nil { + return x.InclusionProof + } + return nil +} + +func (x *TransparencyLogEntry) GetCanonicalizedBody() []byte { + if x != nil { + return x.CanonicalizedBody + } + return nil +} + +var File_sigstore_rekor_proto protoreflect.FileDescriptor + +var file_sigstore_rekor_proto_rawDesc = string([]byte{ + 0x0a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x45, 0x0a, 0x0b, 0x4b, 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1d, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x0a, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x08, 0x65, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x0e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x20, + 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x20, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x10, 0x49, 0x6e, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x16, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xc7, 0x03, 0x0a, 0x14, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x20, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x39, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, + 0x49, 0x64, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x4a, + 0x0a, 0x0c, 0x6b, 0x69, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6e, + 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6b, + 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0f, 0x69, 0x6e, + 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x52, 0x10, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x53, + 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6f, + 0x64, 0x79, 0x42, 0x78, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, + 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_rekor_proto_rawDescOnce sync.Once + file_sigstore_rekor_proto_rawDescData []byte +) + +func file_sigstore_rekor_proto_rawDescGZIP() []byte { + file_sigstore_rekor_proto_rawDescOnce.Do(func() { + file_sigstore_rekor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_rekor_proto_rawDesc), len(file_sigstore_rekor_proto_rawDesc))) + }) + return file_sigstore_rekor_proto_rawDescData +} + +var file_sigstore_rekor_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_sigstore_rekor_proto_goTypes = []any{ + (*KindVersion)(nil), // 0: dev.sigstore.rekor.v1.KindVersion + (*Checkpoint)(nil), // 1: dev.sigstore.rekor.v1.Checkpoint + (*InclusionProof)(nil), // 2: dev.sigstore.rekor.v1.InclusionProof + (*InclusionPromise)(nil), // 3: dev.sigstore.rekor.v1.InclusionPromise + (*TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.LogId)(nil), // 5: dev.sigstore.common.v1.LogId +} +var file_sigstore_rekor_proto_depIdxs = []int32{ + 1, // 0: dev.sigstore.rekor.v1.InclusionProof.checkpoint:type_name -> dev.sigstore.rekor.v1.Checkpoint + 5, // 1: dev.sigstore.rekor.v1.TransparencyLogEntry.log_id:type_name -> dev.sigstore.common.v1.LogId + 0, // 2: dev.sigstore.rekor.v1.TransparencyLogEntry.kind_version:type_name -> dev.sigstore.rekor.v1.KindVersion + 3, // 3: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_promise:type_name -> dev.sigstore.rekor.v1.InclusionPromise + 2, // 4: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_proof:type_name -> dev.sigstore.rekor.v1.InclusionProof + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_sigstore_rekor_proto_init() } +func file_sigstore_rekor_proto_init() { + if File_sigstore_rekor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_rekor_proto_rawDesc), len(file_sigstore_rekor_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_rekor_proto_goTypes, + DependencyIndexes: file_sigstore_rekor_proto_depIdxs, + MessageInfos: file_sigstore_rekor_proto_msgTypes, + }.Build() + File_sigstore_rekor_proto = out.File + file_sigstore_rekor_proto_goTypes = nil + file_sigstore_rekor_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go new file mode 100644 index 00000000000..580d1c69f5a --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go @@ -0,0 +1,1089 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: sigstore_trustroot.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// ServiceSelector specifies how a client SHOULD select a set of +// Services to connect to. A client SHOULD throw an error if +// the value is SERVICE_SELECTOR_UNDEFINED. +type ServiceSelector int32 + +const ( + ServiceSelector_SERVICE_SELECTOR_UNDEFINED ServiceSelector = 0 + // Clients SHOULD select all Services based on supported API version + // and validity window. + ServiceSelector_ALL ServiceSelector = 1 + // Clients SHOULD select one Service based on supported API version + // and validity window. It is up to the client implementation to + // decide how to select the Service, e.g. random or round-robin. + ServiceSelector_ANY ServiceSelector = 2 + // Clients SHOULD select a specific number of Services based on + // supported API version and validity window, using the provided + // `count`. It is up to the client implementation to decide how to + // select the Service, e.g. random or round-robin. + ServiceSelector_EXACT ServiceSelector = 3 +) + +// Enum value maps for ServiceSelector. +var ( + ServiceSelector_name = map[int32]string{ + 0: "SERVICE_SELECTOR_UNDEFINED", + 1: "ALL", + 2: "ANY", + 3: "EXACT", + } + ServiceSelector_value = map[string]int32{ + "SERVICE_SELECTOR_UNDEFINED": 0, + "ALL": 1, + "ANY": 2, + "EXACT": 3, + } +) + +func (x ServiceSelector) Enum() *ServiceSelector { + p := new(ServiceSelector) + *p = x + return p +} + +func (x ServiceSelector) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceSelector) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_trustroot_proto_enumTypes[0].Descriptor() +} + +func (ServiceSelector) Type() protoreflect.EnumType { + return &file_sigstore_trustroot_proto_enumTypes[0] +} + +func (x ServiceSelector) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceSelector.Descriptor instead. +func (ServiceSelector) EnumDescriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0} +} + +// TransparencyLogInstance describes the immutable parameters from a +// transparency log. +// See https://www.rfc-editor.org/rfc/rfc9162.html#name-log-parameters +// for more details. +// The included parameters are the minimal set required to identify a log, +// and verify an inclusion proof/promise. +type TransparencyLogInstance struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The base URL at which can be used to URLs for the client. + // SHOULD match the origin on the log checkpoint: + // https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md#note-text. + BaseUrl string `protobuf:"bytes,1,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"` + // The hash algorithm used for the Merkle Tree. + HashAlgorithm v1.HashAlgorithm `protobuf:"varint,2,opt,name=hash_algorithm,json=hashAlgorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"hash_algorithm,omitempty"` + // The public key used to verify signatures generated by the log. + // This attribute contains the signature algorithm used by the log. + PublicKey *v1.PublicKey `protobuf:"bytes,3,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // The identifier for this transparency log. + // Represented as the SHA-256 hash of the log's public key, + // calculated over the DER encoding of the key represented as + // SubjectPublicKeyInfo. + // See https://www.rfc-editor.org/rfc/rfc6962#section-3.2 + // For Rekor v2 instances, log_id and checkpoint_key_id will be set + // to the same value. + // It is recommended to use checkpoint_key_id instead, since log_id is not + // guaranteed to be unique across multiple deployments. Clients + // must use the key name and key ID, as defined by the signed-note spec + // linked below, from a checkpoint to determine the correct + // TransparencyLogInstance to verify a proof. + // log_id will eventually be deprecated in favor of checkpoint_id. + LogId *v1.LogId `protobuf:"bytes,4,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The unique identifier for the log, used in the checkpoint. + // Only supported for TrustedRoot media types matching or greater than + // application/vnd.dev.sigstore.trustedroot.v0.2+json + // Its calculation is described in + // https://github.com/C2SP/C2SP/blob/main/signed-note.md#signatures + // SHOULD be set for all logs. When not set, clients MUST use log_id. + // + // For Ed25519 signatures, the key ID is computed per the C2SP spec: + // key ID = SHA-256(key name || 0x0A || 0x01 || 32-byte Ed25519 public key)[:4] + // For ECDSA signatures, the key ID is computed per the C2SP spec: + // key ID = SHA-256(PKIX ASN.1 DER-encoded public key, in SubjectPublicKeyInfo format)[:4] + // For RSA signatures, the signature type will be 0xff with an appended identifier for the format, + // "PKIX-RSA-PKCS#1v1.5": + // key ID = SHA-256(key name || 0x0A || 0xff || PKIX-RSA-PKCS#1v1.5 || PKIX ASN.1 DER-encoded public key)[:4] + // + // This is provided for convenience. Clients can also calculate the + // checkpoint key ID given the log's public key. + // SHOULD be 4 bytes long, as a truncated hash. + // + // To find a matching TransparencyLogInstance in the TrustedRoot, + // clients will parse the checkpoint, and for each signature line, + // use the key name (i.e. log origin, base_url from TrustedRoot) + // and checkpoint key ID (i.e. checkpoint_key_id from TrustedRoot) + // which can then be compared against the TrustedRoot log instances. + CheckpointKeyId *v1.LogId `protobuf:"bytes,5,opt,name=checkpoint_key_id,json=checkpointKeyId,proto3" json:"checkpoint_key_id,omitempty"` + // The name of the operator of this log deployment. Operator MUST be + // formatted as a scheme-less URI, e.g. sigstore.dev + // Only supported for TrustedRoot media types matching or greater than + // application/vnd.dev.sigstore.trustedroot.v0.2+json + // This MUST be used when there are multiple transparency log instances + // to determine if log proof verification meets a specified threshold, + // e.g. two proofs from log deployments operated by the same operator + // should count as only one valid proof. + Operator string `protobuf:"bytes,6,opt,name=operator,proto3" json:"operator,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransparencyLogInstance) Reset() { + *x = TransparencyLogInstance{} + mi := &file_sigstore_trustroot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransparencyLogInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogInstance) ProtoMessage() {} + +func (x *TransparencyLogInstance) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogInstance.ProtoReflect.Descriptor instead. +func (*TransparencyLogInstance) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0} +} + +func (x *TransparencyLogInstance) GetBaseUrl() string { + if x != nil { + return x.BaseUrl + } + return "" +} + +func (x *TransparencyLogInstance) GetHashAlgorithm() v1.HashAlgorithm { + if x != nil { + return x.HashAlgorithm + } + return v1.HashAlgorithm(0) +} + +func (x *TransparencyLogInstance) GetPublicKey() *v1.PublicKey { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *TransparencyLogInstance) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogInstance) GetCheckpointKeyId() *v1.LogId { + if x != nil { + return x.CheckpointKeyId + } + return nil +} + +func (x *TransparencyLogInstance) GetOperator() string { + if x != nil { + return x.Operator + } + return "" +} + +// CertificateAuthority enlists the information required to identify which +// CA to use and perform signature verification. +type CertificateAuthority struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The root certificate MUST be self-signed, and so the subject and + // issuer are the same. + Subject *v1.DistinguishedName `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // The URI identifies the certificate authority. + // + // It is RECOMMENDED that the URI is the base URL for the certificate + // authority, that can be provided to any SDK/client provided + // by the certificate authority to interact with the certificate + // authority. + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + // The certificate chain for this CA. The last certificate in the chain + // MUST be the trust anchor. The trust anchor MAY be a self-signed root + // CA certificate or MAY be an intermediate CA certificate. + CertChain *v1.X509CertificateChain `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` + // The time the *entire* chain was valid. This is at max the + // longest interval when *all* certificates in the chain were valid, + // but it MAY be shorter. Clients MUST check timestamps against *both* + // the `valid_for` time range *and* the entire certificate chain. + // + // The TimeRange should be considered valid *inclusive* of the + // endpoints. + ValidFor *v1.TimeRange `protobuf:"bytes,4,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` + // The name of the operator of this certificate or timestamp authority. + // Operator MUST be formatted as a scheme-less URI, e.g. sigstore.dev + // This MUST be used when there are multiple timestamp authorities to + // determine if the signed timestamp verification meets a specified + // threshold, e.g. two signed timestamps from timestamp authorities + // operated by the same operator should count as only one valid + // timestamp. + Operator string `protobuf:"bytes,5,opt,name=operator,proto3" json:"operator,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CertificateAuthority) Reset() { + *x = CertificateAuthority{} + mi := &file_sigstore_trustroot_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CertificateAuthority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateAuthority) ProtoMessage() {} + +func (x *CertificateAuthority) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateAuthority.ProtoReflect.Descriptor instead. +func (*CertificateAuthority) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{1} +} + +func (x *CertificateAuthority) GetSubject() *v1.DistinguishedName { + if x != nil { + return x.Subject + } + return nil +} + +func (x *CertificateAuthority) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *CertificateAuthority) GetCertChain() *v1.X509CertificateChain { + if x != nil { + return x.CertChain + } + return nil +} + +func (x *CertificateAuthority) GetValidFor() *v1.TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +func (x *CertificateAuthority) GetOperator() string { + if x != nil { + return x.Operator + } + return "" +} + +// TrustedRoot describes the client's complete set of trusted entities. +// How the TrustedRoot is populated is not specified, but can be a +// combination of many sources such as TUF repositories, files on disk etc. +// +// The TrustedRoot is not meant to be used for any artifact verification, only +// to capture the complete/global set of trusted verification materials. +// When verifying an artifact, based on the artifact and policies, a selection +// of keys/authorities are expected to be extracted and provided to the +// verification function. This way the set of keys/authorities can be kept to +// a minimal set by the policy to gain better control over what signatures +// that are allowed. +// +// The embedded transparency logs, CT logs, CAs and TSAs MUST include any +// previously used instance -- otherwise signatures made in the past cannot +// be verified. +// +// All the listed instances SHOULD be sorted by the 'valid_for' in ascending +// order, that is, the oldest instance first. Only the last instance is +// allowed to have their 'end' timestamp unset. All previous instances MUST +// have a closed interval of validity. The last instance MAY have a closed +// interval. Clients MUST accept instances that overlaps in time, if not +// clients may experience problems during rotations of verification +// materials. +// +// To be able to manage planned rotations of either transparency logs or +// certificate authorities, clienst MUST accept lists of instances where +// the last instance have a 'valid_for' that belongs to the future. +// This should not be a problem as clients SHOULD first seek the trust root +// for a suitable instance before creating a per artifact trust root (that +// is, a sub-set of the complete trust root) that is used for verification. +type TrustedRoot struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.trustedroot.v0.2+json + // when encoded as JSON. + // Clients MAY choose to also support + // application/vnd.dev.sigstore.trustedroot.v0.1+json + // Clients MAY process and parse content with the media type defined + // in the old format: + // application/vnd.dev.sigstore.trustedroot+json;version=0.1 + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // A set of trusted Rekor servers. + Tlogs []*TransparencyLogInstance `protobuf:"bytes,2,rep,name=tlogs,proto3" json:"tlogs,omitempty"` + // A set of trusted certificate authorities (e.g Fulcio), and any + // intermediate certificates they provide. + // If a CA is issuing multiple intermediate certificate, each + // combination shall be represented as separate chain. I.e, a single + // root cert may appear in multiple chains but with different + // intermediate and/or leaf certificates. + // The certificates are intended to be used for verifying artifact + // signatures. + CertificateAuthorities []*CertificateAuthority `protobuf:"bytes,3,rep,name=certificate_authorities,json=certificateAuthorities,proto3" json:"certificate_authorities,omitempty"` + // A set of trusted certificate transparency logs. + Ctlogs []*TransparencyLogInstance `protobuf:"bytes,4,rep,name=ctlogs,proto3" json:"ctlogs,omitempty"` + // A set of trusted timestamping authorities. + TimestampAuthorities []*CertificateAuthority `protobuf:"bytes,5,rep,name=timestamp_authorities,json=timestampAuthorities,proto3" json:"timestamp_authorities,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TrustedRoot) Reset() { + *x = TrustedRoot{} + mi := &file_sigstore_trustroot_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TrustedRoot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrustedRoot) ProtoMessage() {} + +func (x *TrustedRoot) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrustedRoot.ProtoReflect.Descriptor instead. +func (*TrustedRoot) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{2} +} + +func (x *TrustedRoot) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *TrustedRoot) GetTlogs() []*TransparencyLogInstance { + if x != nil { + return x.Tlogs + } + return nil +} + +func (x *TrustedRoot) GetCertificateAuthorities() []*CertificateAuthority { + if x != nil { + return x.CertificateAuthorities + } + return nil +} + +func (x *TrustedRoot) GetCtlogs() []*TransparencyLogInstance { + if x != nil { + return x.Ctlogs + } + return nil +} + +func (x *TrustedRoot) GetTimestampAuthorities() []*CertificateAuthority { + if x != nil { + return x.TimestampAuthorities + } + return nil +} + +// SigningConfig represents the trusted entities/state needed by Sigstore +// signing. In particular, it primarily contains service URLs that a Sigstore +// signer may need to connect to for the online aspects of signing. +type SigningConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.signingconfig.v0.2+json + // Clients MAY choose to also support + // application/vnd.dev.sigstore.signingconfig.v0.1+json + MediaType string `protobuf:"bytes,5,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // URLs to Fulcio-compatible CAs, capable of receiving + // Certificate Signing Requests (CSRs) and responding with + // issued certificates. + // + // These URLs MUST be the "base" URL for the CAs, which clients + // should construct an appropriate CSR endpoint on top of. + // For example, if a CA URL is `https://example.com/ca`, then + // the client MAY construct the CSR endpoint as + // `https://example.com/ca/api/v2/signingCert`. + // + // Clients MUST select only one Service with the highest API version + // that the client is compatible with, that is within its + // validity period, and has the newest validity start date. + // Client SHOULD select the first Service that meets this requirement. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + CaUrls []*Service `protobuf:"bytes,6,rep,name=ca_urls,json=caUrls,proto3" json:"ca_urls,omitempty"` + // URLs to OpenID Connect identity providers. + // + // These URLs MUST be the "base" URLs for the OIDC IdPs, which clients + // should perform well-known OpenID Connect discovery against. + // + // Clients MUST select only one Service with the highest API version + // that the client is compatible with, that is within its + // validity period, and has the newest validity start date. + // Client SHOULD select the first Service that meets this requirement. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + OidcUrls []*Service `protobuf:"bytes,7,rep,name=oidc_urls,json=oidcUrls,proto3" json:"oidc_urls,omitempty"` + // URLs to Rekor transparency logs. + // + // These URL MUST be the "base" URLs for the transparency logs, + // which clients should construct appropriate API endpoints on top of. + // + // Clients MUST group Services by `operator` and select at most one + // Service from each operator. Clients MUST select Services with the + // highest API version that the client is compatible with, that are + // within its validity period, and have the newest validity start dates. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + // + // Clients MUST select Services based on the selector value of + // `rekor_tlog_config`. + RekorTlogUrls []*Service `protobuf:"bytes,8,rep,name=rekor_tlog_urls,json=rekorTlogUrls,proto3" json:"rekor_tlog_urls,omitempty"` + // Specifies how a client should select the set of Rekor transparency + // logs to write to. + RekorTlogConfig *ServiceConfiguration `protobuf:"bytes,9,opt,name=rekor_tlog_config,json=rekorTlogConfig,proto3" json:"rekor_tlog_config,omitempty"` + // URLs to RFC 3161 Time Stamping Authorities (TSA). + // + // These URLs MUST be the *full* URL for the TSA, meaning that it + // should be suitable for submitting Time Stamp Requests (TSRs) to + // via HTTP, per RFC 3161. + // + // Clients MUST group Services by `operator` and select at most one + // Service from each operator. Clients MUST select Services with the + // highest API version that the client is compatible with, that are + // within its validity period, and have the newest validity start dates. + // All listed Services SHOULD be sorted by the `valid_for` window in + // descending order, with the newest instance first. + // + // Clients MUST select Services based on the selector value of + // `tsa_config`. + TsaUrls []*Service `protobuf:"bytes,10,rep,name=tsa_urls,json=tsaUrls,proto3" json:"tsa_urls,omitempty"` + // Specifies how a client should select the set of TSAs to request + // signed timestamps from. + TsaConfig *ServiceConfiguration `protobuf:"bytes,11,opt,name=tsa_config,json=tsaConfig,proto3" json:"tsa_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SigningConfig) Reset() { + *x = SigningConfig{} + mi := &file_sigstore_trustroot_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SigningConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SigningConfig) ProtoMessage() {} + +func (x *SigningConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SigningConfig.ProtoReflect.Descriptor instead. +func (*SigningConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{3} +} + +func (x *SigningConfig) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *SigningConfig) GetCaUrls() []*Service { + if x != nil { + return x.CaUrls + } + return nil +} + +func (x *SigningConfig) GetOidcUrls() []*Service { + if x != nil { + return x.OidcUrls + } + return nil +} + +func (x *SigningConfig) GetRekorTlogUrls() []*Service { + if x != nil { + return x.RekorTlogUrls + } + return nil +} + +func (x *SigningConfig) GetRekorTlogConfig() *ServiceConfiguration { + if x != nil { + return x.RekorTlogConfig + } + return nil +} + +func (x *SigningConfig) GetTsaUrls() []*Service { + if x != nil { + return x.TsaUrls + } + return nil +} + +func (x *SigningConfig) GetTsaConfig() *ServiceConfiguration { + if x != nil { + return x.TsaConfig + } + return nil +} + +// Service represents an instance of a service that is a part of Sigstore infrastructure. +// When selecting one or multiple services from a list of services, clients MUST: +// - Use the API version hint to determine the service with the highest API version +// that the client is compatible with. +// - Only select services within the specified validity period and that have the +// newest validity start date. +// +// When selecting multiple services, clients MUST: +// - Use the ServiceConfiguration to determine how many services MUST be selected. +// Clients MUST return an error if there are not enough services that meet the +// selection criteria. +// - Group services by `operator` and select at most one service from an operator. +// During verification, clients MUST treat valid verification metadata from the +// operator as valid only once towards a threshold. +// - Select services from only the highest supported API version. +type Service struct { + state protoimpl.MessageState `protogen:"open.v1"` + // URL of the service. MUST include scheme and authority. MAY include path. + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + // Specifies the major API version. A value of 0 represents a service that + // has not yet been released. + MajorApiVersion uint32 `protobuf:"varint,2,opt,name=major_api_version,json=majorApiVersion,proto3" json:"major_api_version,omitempty"` + // Validity period of a service. A service that has only a start date + // SHOULD be considered the most recent instance of that service, but + // the client MUST NOT assume there is only one valid instance. + // The TimeRange MUST be considered valid *inclusive* of the + // endpoints. + ValidFor *v1.TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` + // Specifies the name of the service operator. When selecting multiple + // services, clients MUST use the operator to select services from + // distinct operators. Operator MUST be formatted as a scheme-less + // URI, e.g. sigstore.dev + Operator string `protobuf:"bytes,4,opt,name=operator,proto3" json:"operator,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Service) Reset() { + *x = Service{} + mi := &file_sigstore_trustroot_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service) ProtoMessage() {} + +func (x *Service) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service.ProtoReflect.Descriptor instead. +func (*Service) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{4} +} + +func (x *Service) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Service) GetMajorApiVersion() uint32 { + if x != nil { + return x.MajorApiVersion + } + return 0 +} + +func (x *Service) GetValidFor() *v1.TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +func (x *Service) GetOperator() string { + if x != nil { + return x.Operator + } + return "" +} + +// ServiceConfiguration specifies how a client should select a set of +// Services to connect to, along with a count when a specific number +// of Services is requested. +type ServiceConfiguration struct { + state protoimpl.MessageState `protogen:"open.v1"` + // How a client should select a set of Services to connect to. + // Clients SHOULD NOT select services from multiple API versions. + Selector ServiceSelector `protobuf:"varint,1,opt,name=selector,proto3,enum=dev.sigstore.trustroot.v1.ServiceSelector" json:"selector,omitempty"` + // count specifies the number of Services the client should use. + // Only used when selector is set to EXACT, and count MUST be greater + // than 0. count MUST be less than or equal to the number of Services. + // Clients MUST return an error is there are not enough services + // that meet selection criteria. + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ServiceConfiguration) Reset() { + *x = ServiceConfiguration{} + mi := &file_sigstore_trustroot_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ServiceConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceConfiguration) ProtoMessage() {} + +func (x *ServiceConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceConfiguration.ProtoReflect.Descriptor instead. +func (*ServiceConfiguration) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{5} +} + +func (x *ServiceConfiguration) GetSelector() ServiceSelector { + if x != nil { + return x.Selector + } + return ServiceSelector_SERVICE_SELECTOR_UNDEFINED +} + +func (x *ServiceConfiguration) GetCount() uint32 { + if x != nil { + return x.Count + } + return 0 +} + +// ClientTrustConfig describes the complete state needed by a client +// to perform both signing and verification operations against a particular +// instance of Sigstore. +type ClientTrustConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + // MUST be application/vnd.dev.sigstore.clienttrustconfig.v0.1+json + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // The root of trust, which MUST be present. + TrustedRoot *TrustedRoot `protobuf:"bytes,2,opt,name=trusted_root,json=trustedRoot,proto3" json:"trusted_root,omitempty"` + // Configuration for signing clients, which MUST be present. + SigningConfig *SigningConfig `protobuf:"bytes,3,opt,name=signing_config,json=signingConfig,proto3" json:"signing_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClientTrustConfig) Reset() { + *x = ClientTrustConfig{} + mi := &file_sigstore_trustroot_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClientTrustConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientTrustConfig) ProtoMessage() {} + +func (x *ClientTrustConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientTrustConfig.ProtoReflect.Descriptor instead. +func (*ClientTrustConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{6} +} + +func (x *ClientTrustConfig) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ClientTrustConfig) GetTrustedRoot() *TrustedRoot { + if x != nil { + return x.TrustedRoot + } + return nil +} + +func (x *ClientTrustConfig) GetSigningConfig() *SigningConfig { + if x != nil { + return x.SigningConfig + } + return nil +} + +var File_sigstore_trustroot_proto protoreflect.FileDescriptor + +var file_sigstore_trustroot_proto_rawDesc = string([]byte{ + 0x0a, 0x18, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, + 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x02, + 0x0a, 0x17, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, + 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x73, + 0x65, 0x55, 0x72, 0x6c, 0x12, 0x4c, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, + 0x67, 0x49, 0x64, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x6f, 0x67, 0x49, 0x64, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x22, 0x96, 0x02, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x07, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, + 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x69, 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x3e, + 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x92, 0x03, 0x0a, 0x0b, 0x54, + 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x74, 0x6c, 0x6f, + 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x74, 0x6c, + 0x6f, 0x67, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x4a, 0x0a, + 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x52, 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x64, 0x0a, 0x15, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, + 0xea, 0x03, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x63, 0x61, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x3f, 0x0a, + 0x09, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x6f, 0x69, 0x64, 0x63, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x4a, + 0x0a, 0x0f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x5f, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x75, 0x72, 0x6c, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x6b, + 0x6f, 0x72, 0x54, 0x6c, 0x6f, 0x67, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x5b, 0x0a, 0x11, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x5f, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x54, 0x6c, 0x6f, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x73, 0x61, 0x5f, 0x75, + 0x72, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, + 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x74, + 0x73, 0x61, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x74, 0x73, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, + 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, + 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, 0x73, 0x61, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x05, 0x22, 0xa3, 0x01, 0x0a, + 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x61, + 0x6a, 0x6f, 0x72, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x41, 0x70, 0x69, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, + 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x22, 0x74, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd8, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, + 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4e, 0x0a, + 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a, + 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2a, 0x4e, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x5f, 0x53, 0x45, 0x4c, 0x45, 0x43, 0x54, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, + 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, + 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x58, 0x41, 0x43, + 0x54, 0x10, 0x03, 0x42, 0x88, 0x01, 0x0a, 0x1f, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f, + 0x6f, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, + 0x74, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x17, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, + 0x3a, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_trustroot_proto_rawDescOnce sync.Once + file_sigstore_trustroot_proto_rawDescData []byte +) + +func file_sigstore_trustroot_proto_rawDescGZIP() []byte { + file_sigstore_trustroot_proto_rawDescOnce.Do(func() { + file_sigstore_trustroot_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_trustroot_proto_rawDesc), len(file_sigstore_trustroot_proto_rawDesc))) + }) + return file_sigstore_trustroot_proto_rawDescData +} + +var file_sigstore_trustroot_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_sigstore_trustroot_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_sigstore_trustroot_proto_goTypes = []any{ + (ServiceSelector)(0), // 0: dev.sigstore.trustroot.v1.ServiceSelector + (*TransparencyLogInstance)(nil), // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance + (*CertificateAuthority)(nil), // 2: dev.sigstore.trustroot.v1.CertificateAuthority + (*TrustedRoot)(nil), // 3: dev.sigstore.trustroot.v1.TrustedRoot + (*SigningConfig)(nil), // 4: dev.sigstore.trustroot.v1.SigningConfig + (*Service)(nil), // 5: dev.sigstore.trustroot.v1.Service + (*ServiceConfiguration)(nil), // 6: dev.sigstore.trustroot.v1.ServiceConfiguration + (*ClientTrustConfig)(nil), // 7: dev.sigstore.trustroot.v1.ClientTrustConfig + (v1.HashAlgorithm)(0), // 8: dev.sigstore.common.v1.HashAlgorithm + (*v1.PublicKey)(nil), // 9: dev.sigstore.common.v1.PublicKey + (*v1.LogId)(nil), // 10: dev.sigstore.common.v1.LogId + (*v1.DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName + (*v1.X509CertificateChain)(nil), // 12: dev.sigstore.common.v1.X509CertificateChain + (*v1.TimeRange)(nil), // 13: dev.sigstore.common.v1.TimeRange +} +var file_sigstore_trustroot_proto_depIdxs = []int32{ + 8, // 0: dev.sigstore.trustroot.v1.TransparencyLogInstance.hash_algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 9, // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance.public_key:type_name -> dev.sigstore.common.v1.PublicKey + 10, // 2: dev.sigstore.trustroot.v1.TransparencyLogInstance.log_id:type_name -> dev.sigstore.common.v1.LogId + 10, // 3: dev.sigstore.trustroot.v1.TransparencyLogInstance.checkpoint_key_id:type_name -> dev.sigstore.common.v1.LogId + 11, // 4: dev.sigstore.trustroot.v1.CertificateAuthority.subject:type_name -> dev.sigstore.common.v1.DistinguishedName + 12, // 5: dev.sigstore.trustroot.v1.CertificateAuthority.cert_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 13, // 6: dev.sigstore.trustroot.v1.CertificateAuthority.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 1, // 7: dev.sigstore.trustroot.v1.TrustedRoot.tlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 2, // 8: dev.sigstore.trustroot.v1.TrustedRoot.certificate_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 1, // 9: dev.sigstore.trustroot.v1.TrustedRoot.ctlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 2, // 10: dev.sigstore.trustroot.v1.TrustedRoot.timestamp_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 5, // 11: dev.sigstore.trustroot.v1.SigningConfig.ca_urls:type_name -> dev.sigstore.trustroot.v1.Service + 5, // 12: dev.sigstore.trustroot.v1.SigningConfig.oidc_urls:type_name -> dev.sigstore.trustroot.v1.Service + 5, // 13: dev.sigstore.trustroot.v1.SigningConfig.rekor_tlog_urls:type_name -> dev.sigstore.trustroot.v1.Service + 6, // 14: dev.sigstore.trustroot.v1.SigningConfig.rekor_tlog_config:type_name -> dev.sigstore.trustroot.v1.ServiceConfiguration + 5, // 15: dev.sigstore.trustroot.v1.SigningConfig.tsa_urls:type_name -> dev.sigstore.trustroot.v1.Service + 6, // 16: dev.sigstore.trustroot.v1.SigningConfig.tsa_config:type_name -> dev.sigstore.trustroot.v1.ServiceConfiguration + 13, // 17: dev.sigstore.trustroot.v1.Service.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 0, // 18: dev.sigstore.trustroot.v1.ServiceConfiguration.selector:type_name -> dev.sigstore.trustroot.v1.ServiceSelector + 3, // 19: dev.sigstore.trustroot.v1.ClientTrustConfig.trusted_root:type_name -> dev.sigstore.trustroot.v1.TrustedRoot + 4, // 20: dev.sigstore.trustroot.v1.ClientTrustConfig.signing_config:type_name -> dev.sigstore.trustroot.v1.SigningConfig + 21, // [21:21] is the sub-list for method output_type + 21, // [21:21] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name +} + +func init() { file_sigstore_trustroot_proto_init() } +func file_sigstore_trustroot_proto_init() { + if File_sigstore_trustroot_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_trustroot_proto_rawDesc), len(file_sigstore_trustroot_proto_rawDesc)), + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_trustroot_proto_goTypes, + DependencyIndexes: file_sigstore_trustroot_proto_depIdxs, + EnumInfos: file_sigstore_trustroot_proto_enumTypes, + MessageInfos: file_sigstore_trustroot_proto_msgTypes, + }.Build() + File_sigstore_trustroot_proto = out.File + file_sigstore_trustroot_proto_goTypes = nil + file_sigstore_trustroot_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt b/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt new file mode 100644 index 00000000000..8bb896e7f90 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2025 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE b/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go b/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go new file mode 100644 index 00000000000..bb28b196acb --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go @@ -0,0 +1,68 @@ +// +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safeint + +import ( + "fmt" + "math" +) + +// SafeInt64 holds equivalent int64 and uint64 integers. +type SafeInt64 struct { + u uint64 + i int64 +} + +// NewSafeInt64 returns a safeInt64 struct as long as the number is either an +// int64 or uint64 and the value can safely be converted in either direction +// without overflowing, i.e. is not greater than MaxInt64 and not negative. +// +// This has implications for its usage, e.g. when used for the tree size, a new +// tree must be created to replace the old tree before its size reaches +// math.MaxInt64. +// +// This is needed for compatibility with TransparencyLogEntry +// (https://github.com/sigstore/protobuf-specs/blob/e871d3e6fd06fa73a1524ef0efaf1452d3304cf6/protos/sigstore_rekor.proto#L86-L138). +func NewSafeInt64(number any) (*SafeInt64, error) { + var result SafeInt64 + switch n := number.(type) { + case uint64: + if n > math.MaxInt64 { + return nil, fmt.Errorf("exceeded max int64: %d", n) + } + result.u = n + result.i = int64(n) //nolint:gosec + case int64: + if n < 0 { + return nil, fmt.Errorf("negative integer: %d", n) + } + result.u = uint64(n) //nolint:gosec + result.i = n + default: + return nil, fmt.Errorf("only uint64 and int64 are supported") + } + return &result, nil +} + +// U returns the uint64 value of the integer. +func (s *SafeInt64) U() uint64 { + return s.u +} + +// I returns the int64 value of the integer. +func (s *SafeInt64) I() int64 { + return s.i +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go new file mode 100644 index 00000000000..e243eabb932 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go @@ -0,0 +1,248 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/dsse.proto + +package protobuf + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + dsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A request to add a DSSE v0.0.2 entry to the log +type DSSERequestV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // A DSSE envelope + Envelope *dsse.Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` + // All necessary verification material to verify all signatures embedded in the envelope + Verifiers []*Verifier `protobuf:"bytes,2,rep,name=verifiers,proto3" json:"verifiers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DSSERequestV002) Reset() { + *x = DSSERequestV002{} + mi := &file_rekor_v2_dsse_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DSSERequestV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DSSERequestV002) ProtoMessage() {} + +func (x *DSSERequestV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_dsse_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DSSERequestV002.ProtoReflect.Descriptor instead. +func (*DSSERequestV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_dsse_proto_rawDescGZIP(), []int{0} +} + +func (x *DSSERequestV002) GetEnvelope() *dsse.Envelope { + if x != nil { + return x.Envelope + } + return nil +} + +func (x *DSSERequestV002) GetVerifiers() []*Verifier { + if x != nil { + return x.Verifiers + } + return nil +} + +type DSSELogEntryV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The hash of the DSSE payload + PayloadHash *v1.HashOutput `protobuf:"bytes,1,opt,name=payloadHash,proto3" json:"payloadHash,omitempty"` + // Signatures and their associated verification material used to verify the payload + Signatures []*Signature `protobuf:"bytes,2,rep,name=signatures,proto3" json:"signatures,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DSSELogEntryV002) Reset() { + *x = DSSELogEntryV002{} + mi := &file_rekor_v2_dsse_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DSSELogEntryV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DSSELogEntryV002) ProtoMessage() {} + +func (x *DSSELogEntryV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_dsse_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DSSELogEntryV002.ProtoReflect.Descriptor instead. +func (*DSSELogEntryV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_dsse_proto_rawDescGZIP(), []int{1} +} + +func (x *DSSELogEntryV002) GetPayloadHash() *v1.HashOutput { + if x != nil { + return x.PayloadHash + } + return nil +} + +func (x *DSSELogEntryV002) GetSignatures() []*Signature { + if x != nil { + return x.Signatures + } + return nil +} + +var File_rekor_v2_dsse_proto protoreflect.FileDescriptor + +var file_rekor_v2_dsse_proto_rawDesc = string([]byte{ + 0x0a, 0x13, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x73, 0x73, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, + 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8b, 0x01, + 0x0a, 0x0f, 0x44, 0x53, 0x53, 0x45, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, + 0x32, 0x12, 0x34, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x65, + 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x65, 0x76, + 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x09, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x10, + 0x44, 0x53, 0x53, 0x45, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, + 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x45, 0x0a, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, + 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x42, 0x7d, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x0b, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x44, 0x73, 0x73, 0x65, 0x50, 0x01, + 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, + 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, + 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_dsse_proto_rawDescOnce sync.Once + file_rekor_v2_dsse_proto_rawDescData []byte +) + +func file_rekor_v2_dsse_proto_rawDescGZIP() []byte { + file_rekor_v2_dsse_proto_rawDescOnce.Do(func() { + file_rekor_v2_dsse_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_dsse_proto_rawDesc), len(file_rekor_v2_dsse_proto_rawDesc))) + }) + return file_rekor_v2_dsse_proto_rawDescData +} + +var file_rekor_v2_dsse_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_rekor_v2_dsse_proto_goTypes = []any{ + (*DSSERequestV002)(nil), // 0: dev.sigstore.rekor.v2.DSSERequestV002 + (*DSSELogEntryV002)(nil), // 1: dev.sigstore.rekor.v2.DSSELogEntryV002 + (*dsse.Envelope)(nil), // 2: io.intoto.Envelope + (*Verifier)(nil), // 3: dev.sigstore.rekor.v2.Verifier + (*v1.HashOutput)(nil), // 4: dev.sigstore.common.v1.HashOutput + (*Signature)(nil), // 5: dev.sigstore.rekor.v2.Signature +} +var file_rekor_v2_dsse_proto_depIdxs = []int32{ + 2, // 0: dev.sigstore.rekor.v2.DSSERequestV002.envelope:type_name -> io.intoto.Envelope + 3, // 1: dev.sigstore.rekor.v2.DSSERequestV002.verifiers:type_name -> dev.sigstore.rekor.v2.Verifier + 4, // 2: dev.sigstore.rekor.v2.DSSELogEntryV002.payloadHash:type_name -> dev.sigstore.common.v1.HashOutput + 5, // 3: dev.sigstore.rekor.v2.DSSELogEntryV002.signatures:type_name -> dev.sigstore.rekor.v2.Signature + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_rekor_v2_dsse_proto_init() } +func file_rekor_v2_dsse_proto_init() { + if File_rekor_v2_dsse_proto != nil { + return + } + file_rekor_v2_verifier_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_dsse_proto_rawDesc), len(file_rekor_v2_dsse_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_dsse_proto_goTypes, + DependencyIndexes: file_rekor_v2_dsse_proto_depIdxs, + MessageInfos: file_rekor_v2_dsse_proto_msgTypes, + }.Build() + File_rekor_v2_dsse_proto = out.File + file_rekor_v2_dsse_proto_goTypes = nil + file_rekor_v2_dsse_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go new file mode 100644 index 00000000000..fded303ae7a --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go @@ -0,0 +1,395 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/entry.proto + +package protobuf + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Entry is the message that is canonicalized and uploaded to the log. +// This format is meant to be compliant with Rekor v1 entries in that +// the `apiVersion` and `kind` can be parsed before parsing the spec. +// Clients are expected to understand and handle the differences in the +// contents of `spec` between Rekor v1 (a polymorphic OpenAPI defintion) +// and Rekor v2 (a typed proto defintion). +type Entry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + ApiVersion string `protobuf:"bytes,2,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + Spec *Spec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Entry) Reset() { + *x = Entry{} + mi := &file_rekor_v2_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entry) ProtoMessage() {} + +func (x *Entry) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_entry_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entry.ProtoReflect.Descriptor instead. +func (*Entry) Descriptor() ([]byte, []int) { + return file_rekor_v2_entry_proto_rawDescGZIP(), []int{0} +} + +func (x *Entry) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *Entry) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *Entry) GetSpec() *Spec { + if x != nil { + return x.Spec + } + return nil +} + +// Spec contains one of the Rekor entry types. +type Spec struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Spec: + // + // *Spec_HashedRekordV002 + // *Spec_DsseV002 + Spec isSpec_Spec `protobuf_oneof:"spec"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Spec) Reset() { + *x = Spec{} + mi := &file_rekor_v2_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Spec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Spec) ProtoMessage() {} + +func (x *Spec) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_entry_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Spec.ProtoReflect.Descriptor instead. +func (*Spec) Descriptor() ([]byte, []int) { + return file_rekor_v2_entry_proto_rawDescGZIP(), []int{1} +} + +func (x *Spec) GetSpec() isSpec_Spec { + if x != nil { + return x.Spec + } + return nil +} + +func (x *Spec) GetHashedRekordV002() *HashedRekordLogEntryV002 { + if x != nil { + if x, ok := x.Spec.(*Spec_HashedRekordV002); ok { + return x.HashedRekordV002 + } + } + return nil +} + +func (x *Spec) GetDsseV002() *DSSELogEntryV002 { + if x != nil { + if x, ok := x.Spec.(*Spec_DsseV002); ok { + return x.DsseV002 + } + } + return nil +} + +type isSpec_Spec interface { + isSpec_Spec() +} + +type Spec_HashedRekordV002 struct { + HashedRekordV002 *HashedRekordLogEntryV002 `protobuf:"bytes,1,opt,name=hashed_rekord_v002,json=hashedRekordV002,proto3,oneof"` +} + +type Spec_DsseV002 struct { + DsseV002 *DSSELogEntryV002 `protobuf:"bytes,2,opt,name=dsse_v002,json=dsseV002,proto3,oneof"` +} + +func (*Spec_HashedRekordV002) isSpec_Spec() {} + +func (*Spec_DsseV002) isSpec_Spec() {} + +// Create a new HashedRekord or DSSE +type CreateEntryRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Spec: + // + // *CreateEntryRequest_HashedRekordRequestV002 + // *CreateEntryRequest_DsseRequestV002 + Spec isCreateEntryRequest_Spec `protobuf_oneof:"spec"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateEntryRequest) Reset() { + *x = CreateEntryRequest{} + mi := &file_rekor_v2_entry_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateEntryRequest) ProtoMessage() {} + +func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_entry_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead. +func (*CreateEntryRequest) Descriptor() ([]byte, []int) { + return file_rekor_v2_entry_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateEntryRequest) GetSpec() isCreateEntryRequest_Spec { + if x != nil { + return x.Spec + } + return nil +} + +func (x *CreateEntryRequest) GetHashedRekordRequestV002() *HashedRekordRequestV002 { + if x != nil { + if x, ok := x.Spec.(*CreateEntryRequest_HashedRekordRequestV002); ok { + return x.HashedRekordRequestV002 + } + } + return nil +} + +func (x *CreateEntryRequest) GetDsseRequestV002() *DSSERequestV002 { + if x != nil { + if x, ok := x.Spec.(*CreateEntryRequest_DsseRequestV002); ok { + return x.DsseRequestV002 + } + } + return nil +} + +type isCreateEntryRequest_Spec interface { + isCreateEntryRequest_Spec() +} + +type CreateEntryRequest_HashedRekordRequestV002 struct { + HashedRekordRequestV002 *HashedRekordRequestV002 `protobuf:"bytes,1,opt,name=hashed_rekord_request_v002,json=hashedRekordRequestV002,proto3,oneof"` +} + +type CreateEntryRequest_DsseRequestV002 struct { + DsseRequestV002 *DSSERequestV002 `protobuf:"bytes,2,opt,name=dsse_request_v002,json=dsseRequestV002,proto3,oneof"` +} + +func (*CreateEntryRequest_HashedRekordRequestV002) isCreateEntryRequest_Spec() {} + +func (*CreateEntryRequest_DsseRequestV002) isCreateEntryRequest_Spec() {} + +var File_rekor_v2_entry_proto protoreflect.FileDescriptor + +var file_rekor_v2_entry_proto_rawDesc = string([]byte{ + 0x0a, 0x14, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x13, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x73, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x61, + 0x73, 0x68, 0x65, 0x64, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x7c, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x12, 0x24, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x70, + 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0xc1, + 0x01, 0x0a, 0x04, 0x53, 0x70, 0x65, 0x63, 0x12, 0x64, 0x0a, 0x12, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x64, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10, 0x68, 0x61, 0x73, + 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x56, 0x30, 0x30, 0x32, 0x12, 0x4b, 0x0a, + 0x09, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x53, 0x53, 0x45, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, + 0x52, 0x08, 0x64, 0x73, 0x73, 0x65, 0x56, 0x30, 0x30, 0x32, 0x42, 0x06, 0x0a, 0x04, 0x73, 0x70, + 0x65, 0x63, 0x22, 0xeb, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x72, 0x0a, 0x1a, 0x68, 0x61, 0x73, + 0x68, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, + 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x48, 0x00, 0x52, 0x17, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x12, 0x59, 0x0a, + 0x11, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x30, + 0x30, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x53, 0x53, 0x45, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x73, 0x73, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x42, 0x06, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, + 0x42, 0x7e, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x42, + 0x0c, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x01, 0x5a, + 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73, + 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x32, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_entry_proto_rawDescOnce sync.Once + file_rekor_v2_entry_proto_rawDescData []byte +) + +func file_rekor_v2_entry_proto_rawDescGZIP() []byte { + file_rekor_v2_entry_proto_rawDescOnce.Do(func() { + file_rekor_v2_entry_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_entry_proto_rawDesc), len(file_rekor_v2_entry_proto_rawDesc))) + }) + return file_rekor_v2_entry_proto_rawDescData +} + +var file_rekor_v2_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_rekor_v2_entry_proto_goTypes = []any{ + (*Entry)(nil), // 0: dev.sigstore.rekor.v2.Entry + (*Spec)(nil), // 1: dev.sigstore.rekor.v2.Spec + (*CreateEntryRequest)(nil), // 2: dev.sigstore.rekor.v2.CreateEntryRequest + (*HashedRekordLogEntryV002)(nil), // 3: dev.sigstore.rekor.v2.HashedRekordLogEntryV002 + (*DSSELogEntryV002)(nil), // 4: dev.sigstore.rekor.v2.DSSELogEntryV002 + (*HashedRekordRequestV002)(nil), // 5: dev.sigstore.rekor.v2.HashedRekordRequestV002 + (*DSSERequestV002)(nil), // 6: dev.sigstore.rekor.v2.DSSERequestV002 +} +var file_rekor_v2_entry_proto_depIdxs = []int32{ + 1, // 0: dev.sigstore.rekor.v2.Entry.spec:type_name -> dev.sigstore.rekor.v2.Spec + 3, // 1: dev.sigstore.rekor.v2.Spec.hashed_rekord_v002:type_name -> dev.sigstore.rekor.v2.HashedRekordLogEntryV002 + 4, // 2: dev.sigstore.rekor.v2.Spec.dsse_v002:type_name -> dev.sigstore.rekor.v2.DSSELogEntryV002 + 5, // 3: dev.sigstore.rekor.v2.CreateEntryRequest.hashed_rekord_request_v002:type_name -> dev.sigstore.rekor.v2.HashedRekordRequestV002 + 6, // 4: dev.sigstore.rekor.v2.CreateEntryRequest.dsse_request_v002:type_name -> dev.sigstore.rekor.v2.DSSERequestV002 + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_rekor_v2_entry_proto_init() } +func file_rekor_v2_entry_proto_init() { + if File_rekor_v2_entry_proto != nil { + return + } + file_rekor_v2_dsse_proto_init() + file_rekor_v2_hashedrekord_proto_init() + file_rekor_v2_entry_proto_msgTypes[1].OneofWrappers = []any{ + (*Spec_HashedRekordV002)(nil), + (*Spec_DsseV002)(nil), + } + file_rekor_v2_entry_proto_msgTypes[2].OneofWrappers = []any{ + (*CreateEntryRequest_HashedRekordRequestV002)(nil), + (*CreateEntryRequest_DsseRequestV002)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_entry_proto_rawDesc), len(file_rekor_v2_entry_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_entry_proto_goTypes, + DependencyIndexes: file_rekor_v2_entry_proto_depIdxs, + MessageInfos: file_rekor_v2_entry_proto_msgTypes, + }.Build() + File_rekor_v2_entry_proto = out.File + file_rekor_v2_entry_proto_goTypes = nil + file_rekor_v2_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go new file mode 100644 index 00000000000..21d0612dafd --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go @@ -0,0 +1,243 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/hashedrekord.proto + +package protobuf + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A request to add a hashedrekord v0.0.2 to the log +type HashedRekordRequestV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The hashed data + Digest []byte `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + // A single signature over the hashed data with the verifier needed to validate it + Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashedRekordRequestV002) Reset() { + *x = HashedRekordRequestV002{} + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashedRekordRequestV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashedRekordRequestV002) ProtoMessage() {} + +func (x *HashedRekordRequestV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashedRekordRequestV002.ProtoReflect.Descriptor instead. +func (*HashedRekordRequestV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_hashedrekord_proto_rawDescGZIP(), []int{0} +} + +func (x *HashedRekordRequestV002) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +func (x *HashedRekordRequestV002) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} + +type HashedRekordLogEntryV002 struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The hashed data + Data *v1.HashOutput `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // A single signature over the hashed data with the verifier needed to validate it + Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashedRekordLogEntryV002) Reset() { + *x = HashedRekordLogEntryV002{} + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashedRekordLogEntryV002) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashedRekordLogEntryV002) ProtoMessage() {} + +func (x *HashedRekordLogEntryV002) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_hashedrekord_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashedRekordLogEntryV002.ProtoReflect.Descriptor instead. +func (*HashedRekordLogEntryV002) Descriptor() ([]byte, []int) { + return file_rekor_v2_hashedrekord_proto_rawDescGZIP(), []int{1} +} + +func (x *HashedRekordLogEntryV002) GetData() *v1.HashOutput { + if x != nil { + return x.Data + } + return nil +} + +func (x *HashedRekordLogEntryV002) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} + +var File_rekor_v2_hashedrekord_proto protoreflect.FileDescriptor + +var file_rekor_v2_hashedrekord_proto_rawDesc = string([]byte{ + 0x0a, 0x1b, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x64, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, + 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7b, 0x0a, 0x17, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, + 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, + 0x12, 0x1b, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x22, 0x9c, 0x01, 0x0a, 0x18, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, + 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, 0x12, + 0x3b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x43, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, + 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x42, 0x85, 0x01, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x13, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, + 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, + 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +}) + +var ( + file_rekor_v2_hashedrekord_proto_rawDescOnce sync.Once + file_rekor_v2_hashedrekord_proto_rawDescData []byte +) + +func file_rekor_v2_hashedrekord_proto_rawDescGZIP() []byte { + file_rekor_v2_hashedrekord_proto_rawDescOnce.Do(func() { + file_rekor_v2_hashedrekord_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_hashedrekord_proto_rawDesc), len(file_rekor_v2_hashedrekord_proto_rawDesc))) + }) + return file_rekor_v2_hashedrekord_proto_rawDescData +} + +var file_rekor_v2_hashedrekord_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_rekor_v2_hashedrekord_proto_goTypes = []any{ + (*HashedRekordRequestV002)(nil), // 0: dev.sigstore.rekor.v2.HashedRekordRequestV002 + (*HashedRekordLogEntryV002)(nil), // 1: dev.sigstore.rekor.v2.HashedRekordLogEntryV002 + (*Signature)(nil), // 2: dev.sigstore.rekor.v2.Signature + (*v1.HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput +} +var file_rekor_v2_hashedrekord_proto_depIdxs = []int32{ + 2, // 0: dev.sigstore.rekor.v2.HashedRekordRequestV002.signature:type_name -> dev.sigstore.rekor.v2.Signature + 3, // 1: dev.sigstore.rekor.v2.HashedRekordLogEntryV002.data:type_name -> dev.sigstore.common.v1.HashOutput + 2, // 2: dev.sigstore.rekor.v2.HashedRekordLogEntryV002.signature:type_name -> dev.sigstore.rekor.v2.Signature + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_rekor_v2_hashedrekord_proto_init() } +func file_rekor_v2_hashedrekord_proto_init() { + if File_rekor_v2_hashedrekord_proto != nil { + return + } + file_rekor_v2_verifier_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_hashedrekord_proto_rawDesc), len(file_rekor_v2_hashedrekord_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_hashedrekord_proto_goTypes, + DependencyIndexes: file_rekor_v2_hashedrekord_proto_depIdxs, + MessageInfos: file_rekor_v2_hashedrekord_proto_msgTypes, + }.Build() + File_rekor_v2_hashedrekord_proto = out.File + file_rekor_v2_hashedrekord_proto_goTypes = nil + file_rekor_v2_hashedrekord_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go new file mode 100644 index 00000000000..f261cdc3596 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go @@ -0,0 +1,287 @@ +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/rekor_service.proto + +package protobuf + +import ( + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + httpbody "google.golang.org/genproto/googleapis/api/httpbody" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request for a full or partial tile (see https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md#merkle-tree) +type TileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + L uint32 `protobuf:"varint,1,opt,name=L,proto3" json:"L,omitempty"` + // N must be either an index encoded as zero-padded 3-digit path elements, e.g. "x123/x456/789", + // and may end with ".p/", where "" is a uint8 + N string `protobuf:"bytes,2,opt,name=N,proto3" json:"N,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TileRequest) Reset() { + *x = TileRequest{} + mi := &file_rekor_v2_rekor_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TileRequest) ProtoMessage() {} + +func (x *TileRequest) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_rekor_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TileRequest.ProtoReflect.Descriptor instead. +func (*TileRequest) Descriptor() ([]byte, []int) { + return file_rekor_v2_rekor_service_proto_rawDescGZIP(), []int{0} +} + +func (x *TileRequest) GetL() uint32 { + if x != nil { + return x.L + } + return 0 +} + +func (x *TileRequest) GetN() string { + if x != nil { + return x.N + } + return "" +} + +// Request for a full or partial entry bundle (see https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md#log-entries) +type EntryBundleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // N must be either an index encoded as zero-padded 3-digit path elements, e.g. "x123/x456/789", + // and may end with ".p/", where "" is a uint8 + N string `protobuf:"bytes,1,opt,name=N,proto3" json:"N,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntryBundleRequest) Reset() { + *x = EntryBundleRequest{} + mi := &file_rekor_v2_rekor_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntryBundleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntryBundleRequest) ProtoMessage() {} + +func (x *EntryBundleRequest) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_rekor_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntryBundleRequest.ProtoReflect.Descriptor instead. +func (*EntryBundleRequest) Descriptor() ([]byte, []int) { + return file_rekor_v2_rekor_service_proto_rawDescGZIP(), []int{1} +} + +func (x *EntryBundleRequest) GetN() string { + if x != nil { + return x.N + } + return "" +} + +var File_rekor_v2_rekor_service_proto protoreflect.FileDescriptor + +var file_rekor_v2_rekor_service_proto_rawDesc = string([]byte{ + 0x0a, 0x1c, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, + 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x14, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x29, 0x0a, 0x0b, 0x54, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x01, 0x4c, 0x12, 0x0c, 0x0a, 0x01, 0x4e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x01, 0x4e, 0x22, 0x22, 0x0a, 0x12, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x4e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x01, 0x4e, 0x32, 0xc8, 0x03, 0x0a, 0x05, 0x52, 0x65, 0x6b, 0x6f, 0x72, + 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, + 0x3a, 0x01, 0x2a, 0x22, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x6c, 0x6f, 0x67, + 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x54, + 0x69, 0x6c, 0x65, 0x12, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x1f, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x74, + 0x69, 0x6c, 0x65, 0x2f, 0x7b, 0x4c, 0x7d, 0x2f, 0x7b, 0x4e, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x76, + 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x12, 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, + 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, + 0x79, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x32, 0x2f, 0x74, 0x69, 0x6c, 0x65, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2f, + 0x7b, 0x4e, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x59, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x42, 0xc0, 0x03, 0x92, 0x41, 0xbc, 0x02, 0x12, 0xbc, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x6b, + 0x6f, 0x72, 0x20, 0x76, 0x32, 0x22, 0x5a, 0x0a, 0x10, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x20, 0x76, + 0x32, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, + 0x65, 0x73, 0x1a, 0x1d, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2d, 0x64, 0x65, 0x76, + 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2a, 0x4f, 0x0a, 0x12, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x20, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x20, 0x32, 0x2e, 0x30, 0x12, 0x39, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73, + 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x69, 0x6e, 0x2f, 0x4c, 0x49, 0x43, 0x45, 0x4e, + 0x53, 0x45, 0x32, 0x03, 0x32, 0x2e, 0x30, 0x1a, 0x14, 0x2a, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x2a, 0x01, 0x01, + 0x32, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x73, + 0x6f, 0x6e, 0x3a, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x6a, 0x73, 0x6f, 0x6e, 0x72, 0x3e, 0x0a, 0x13, 0x4d, 0x6f, 0x72, 0x65, 0x20, 0x61, 0x62, 0x6f, + 0x75, 0x74, 0x20, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x20, 0x76, 0x32, 0x12, 0x27, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, + 0x69, 0x6c, 0x65, 0x73, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x0e, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, + 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, + 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, + 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_rekor_service_proto_rawDescOnce sync.Once + file_rekor_v2_rekor_service_proto_rawDescData []byte +) + +func file_rekor_v2_rekor_service_proto_rawDescGZIP() []byte { + file_rekor_v2_rekor_service_proto_rawDescOnce.Do(func() { + file_rekor_v2_rekor_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_rekor_service_proto_rawDesc), len(file_rekor_v2_rekor_service_proto_rawDesc))) + }) + return file_rekor_v2_rekor_service_proto_rawDescData +} + +var file_rekor_v2_rekor_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_rekor_v2_rekor_service_proto_goTypes = []any{ + (*TileRequest)(nil), // 0: dev.sigstore.rekor.v2.TileRequest + (*EntryBundleRequest)(nil), // 1: dev.sigstore.rekor.v2.EntryBundleRequest + (*CreateEntryRequest)(nil), // 2: dev.sigstore.rekor.v2.CreateEntryRequest + (*emptypb.Empty)(nil), // 3: google.protobuf.Empty + (*v1.TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry + (*httpbody.HttpBody)(nil), // 5: google.api.HttpBody +} +var file_rekor_v2_rekor_service_proto_depIdxs = []int32{ + 2, // 0: dev.sigstore.rekor.v2.Rekor.CreateEntry:input_type -> dev.sigstore.rekor.v2.CreateEntryRequest + 0, // 1: dev.sigstore.rekor.v2.Rekor.GetTile:input_type -> dev.sigstore.rekor.v2.TileRequest + 1, // 2: dev.sigstore.rekor.v2.Rekor.GetEntryBundle:input_type -> dev.sigstore.rekor.v2.EntryBundleRequest + 3, // 3: dev.sigstore.rekor.v2.Rekor.GetCheckpoint:input_type -> google.protobuf.Empty + 4, // 4: dev.sigstore.rekor.v2.Rekor.CreateEntry:output_type -> dev.sigstore.rekor.v1.TransparencyLogEntry + 5, // 5: dev.sigstore.rekor.v2.Rekor.GetTile:output_type -> google.api.HttpBody + 5, // 6: dev.sigstore.rekor.v2.Rekor.GetEntryBundle:output_type -> google.api.HttpBody + 5, // 7: dev.sigstore.rekor.v2.Rekor.GetCheckpoint:output_type -> google.api.HttpBody + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_rekor_v2_rekor_service_proto_init() } +func file_rekor_v2_rekor_service_proto_init() { + if File_rekor_v2_rekor_service_proto != nil { + return + } + file_rekor_v2_entry_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_rekor_service_proto_rawDesc), len(file_rekor_v2_rekor_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_rekor_v2_rekor_service_proto_goTypes, + DependencyIndexes: file_rekor_v2_rekor_service_proto_depIdxs, + MessageInfos: file_rekor_v2_rekor_service_proto_msgTypes, + }.Build() + File_rekor_v2_rekor_service_proto = out.File + file_rekor_v2_rekor_service_proto_goTypes = nil + file_rekor_v2_rekor_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go new file mode 100644 index 00000000000..e0d4857bee4 --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go @@ -0,0 +1,381 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: rekor/v2/rekor_service.proto + +/* +Package protobuf is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package protobuf + +import ( + "context" + "errors" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" +) + +// Suppress "imported and not used" errors +var ( + _ codes.Code + _ io.Reader + _ status.Status + _ = errors.New + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join +) + +func request_Rekor_CreateEntry_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateEntryRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.CreateEntry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_CreateEntry_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateEntryRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.CreateEntry(ctx, &protoReq) + return msg, metadata, err +} + +func request_Rekor_GetTile_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq TileRequest + metadata runtime.ServerMetadata + err error + ) + io.Copy(io.Discard, req.Body) + val, ok := pathParams["L"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "L") + } + protoReq.L, err = runtime.Uint32(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "L", err) + } + val, ok = pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := client.GetTile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_GetTile_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq TileRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["L"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "L") + } + protoReq.L, err = runtime.Uint32(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "L", err) + } + val, ok = pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := server.GetTile(ctx, &protoReq) + return msg, metadata, err +} + +func request_Rekor_GetEntryBundle_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq EntryBundleRequest + metadata runtime.ServerMetadata + err error + ) + io.Copy(io.Discard, req.Body) + val, ok := pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := client.GetEntryBundle(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_GetEntryBundle_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq EntryBundleRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["N"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N") + } + protoReq.N, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err) + } + msg, err := server.GetEntryBundle(ctx, &protoReq) + return msg, metadata, err +} + +func request_Rekor_GetCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq emptypb.Empty + metadata runtime.ServerMetadata + ) + io.Copy(io.Discard, req.Body) + msg, err := client.GetCheckpoint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_Rekor_GetCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq emptypb.Empty + metadata runtime.ServerMetadata + ) + msg, err := server.GetCheckpoint(ctx, &protoReq) + return msg, metadata, err +} + +// RegisterRekorHandlerServer registers the http handlers for service Rekor to "mux". +// UnaryRPC :call RekorServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterRekorHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. +func RegisterRekorHandlerServer(ctx context.Context, mux *runtime.ServeMux, server RekorServer) error { + mux.Handle(http.MethodPost, pattern_Rekor_CreateEntry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/CreateEntry", runtime.WithHTTPPathPattern("/api/v2/log/entries")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_CreateEntry_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_CreateEntry_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetTile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetTile", runtime.WithHTTPPathPattern("/api/v2/tile/{L}/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_GetTile_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetTile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetEntryBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle", runtime.WithHTTPPathPattern("/api/v2/tile/entries/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_GetEntryBundle_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetEntryBundle_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint", runtime.WithHTTPPathPattern("/api/v2/checkpoint")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Rekor_GetCheckpoint_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetCheckpoint_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + return nil +} + +// RegisterRekorHandlerFromEndpoint is same as RegisterRekorHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterRekorHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.NewClient(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + return RegisterRekorHandler(ctx, mux, conn) +} + +// RegisterRekorHandler registers the http handlers for service Rekor to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterRekorHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterRekorHandlerClient(ctx, mux, NewRekorClient(conn)) +} + +// RegisterRekorHandlerClient registers the http handlers for service Rekor +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "RekorClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "RekorClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "RekorClient" to call the correct interceptors. This client ignores the HTTP middlewares. +func RegisterRekorHandlerClient(ctx context.Context, mux *runtime.ServeMux, client RekorClient) error { + mux.Handle(http.MethodPost, pattern_Rekor_CreateEntry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/CreateEntry", runtime.WithHTTPPathPattern("/api/v2/log/entries")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_CreateEntry_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_CreateEntry_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetTile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetTile", runtime.WithHTTPPathPattern("/api/v2/tile/{L}/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_GetTile_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetTile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetEntryBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle", runtime.WithHTTPPathPattern("/api/v2/tile/entries/{N=**}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_GetEntryBundle_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetEntryBundle_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_Rekor_GetCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint", runtime.WithHTTPPathPattern("/api/v2/checkpoint")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Rekor_GetCheckpoint_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_Rekor_GetCheckpoint_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + return nil +} + +var ( + pattern_Rekor_CreateEntry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v2", "log", "entries"}, "")) + pattern_Rekor_GetTile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "v2", "tile", "L", "N"}, "")) + pattern_Rekor_GetEntryBundle_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "v2", "tile", "entries", "N"}, "")) + pattern_Rekor_GetCheckpoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v2", "checkpoint"}, "")) +) + +var ( + forward_Rekor_CreateEntry_0 = runtime.ForwardResponseMessage + forward_Rekor_GetTile_0 = runtime.ForwardResponseMessage + forward_Rekor_GetEntryBundle_0 = runtime.ForwardResponseMessage + forward_Rekor_GetCheckpoint_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go new file mode 100644 index 00000000000..807d630524a --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go @@ -0,0 +1,266 @@ +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v6.30.2 +// source: rekor/v2/rekor_service.proto + +package protobuf + +import ( + context "context" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + httpbody "google.golang.org/genproto/googleapis/api/httpbody" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Rekor_CreateEntry_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/CreateEntry" + Rekor_GetTile_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetTile" + Rekor_GetEntryBundle_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle" + Rekor_GetCheckpoint_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint" +) + +// RekorClient is the client API for Rekor service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// A service for sigstore clients to connect to to create log entries +// and for log monitors and witnesses to audit/inspect the log +type RekorClient interface { + // Create an entry in the log + CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*v1.TransparencyLogEntry, error) + // Get a tile from the log + GetTile(ctx context.Context, in *TileRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) + // Get an entry bundle from the log + GetEntryBundle(ctx context.Context, in *EntryBundleRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) + // Get a checkpoint from the log + GetCheckpoint(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*httpbody.HttpBody, error) +} + +type rekorClient struct { + cc grpc.ClientConnInterface +} + +func NewRekorClient(cc grpc.ClientConnInterface) RekorClient { + return &rekorClient{cc} +} + +func (c *rekorClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*v1.TransparencyLogEntry, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(v1.TransparencyLogEntry) + err := c.cc.Invoke(ctx, Rekor_CreateEntry_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *rekorClient) GetTile(ctx context.Context, in *TileRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(httpbody.HttpBody) + err := c.cc.Invoke(ctx, Rekor_GetTile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *rekorClient) GetEntryBundle(ctx context.Context, in *EntryBundleRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(httpbody.HttpBody) + err := c.cc.Invoke(ctx, Rekor_GetEntryBundle_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *rekorClient) GetCheckpoint(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*httpbody.HttpBody, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(httpbody.HttpBody) + err := c.cc.Invoke(ctx, Rekor_GetCheckpoint_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RekorServer is the server API for Rekor service. +// All implementations must embed UnimplementedRekorServer +// for forward compatibility. +// +// A service for sigstore clients to connect to to create log entries +// and for log monitors and witnesses to audit/inspect the log +type RekorServer interface { + // Create an entry in the log + CreateEntry(context.Context, *CreateEntryRequest) (*v1.TransparencyLogEntry, error) + // Get a tile from the log + GetTile(context.Context, *TileRequest) (*httpbody.HttpBody, error) + // Get an entry bundle from the log + GetEntryBundle(context.Context, *EntryBundleRequest) (*httpbody.HttpBody, error) + // Get a checkpoint from the log + GetCheckpoint(context.Context, *emptypb.Empty) (*httpbody.HttpBody, error) + mustEmbedUnimplementedRekorServer() +} + +// UnimplementedRekorServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRekorServer struct{} + +func (UnimplementedRekorServer) CreateEntry(context.Context, *CreateEntryRequest) (*v1.TransparencyLogEntry, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented") +} +func (UnimplementedRekorServer) GetTile(context.Context, *TileRequest) (*httpbody.HttpBody, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTile not implemented") +} +func (UnimplementedRekorServer) GetEntryBundle(context.Context, *EntryBundleRequest) (*httpbody.HttpBody, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEntryBundle not implemented") +} +func (UnimplementedRekorServer) GetCheckpoint(context.Context, *emptypb.Empty) (*httpbody.HttpBody, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCheckpoint not implemented") +} +func (UnimplementedRekorServer) mustEmbedUnimplementedRekorServer() {} +func (UnimplementedRekorServer) testEmbeddedByValue() {} + +// UnsafeRekorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RekorServer will +// result in compilation errors. +type UnsafeRekorServer interface { + mustEmbedUnimplementedRekorServer() +} + +func RegisterRekorServer(s grpc.ServiceRegistrar, srv RekorServer) { + // If the following call pancis, it indicates UnimplementedRekorServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Rekor_ServiceDesc, srv) +} + +func _Rekor_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).CreateEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_CreateEntry_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).CreateEntry(ctx, req.(*CreateEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Rekor_GetTile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).GetTile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_GetTile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).GetTile(ctx, req.(*TileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Rekor_GetEntryBundle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EntryBundleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).GetEntryBundle(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_GetEntryBundle_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).GetEntryBundle(ctx, req.(*EntryBundleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Rekor_GetCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RekorServer).GetCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Rekor_GetCheckpoint_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RekorServer).GetCheckpoint(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Rekor_ServiceDesc is the grpc.ServiceDesc for Rekor service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Rekor_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "dev.sigstore.rekor.v2.Rekor", + HandlerType: (*RekorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateEntry", + Handler: _Rekor_CreateEntry_Handler, + }, + { + MethodName: "GetTile", + Handler: _Rekor_GetTile_Handler, + }, + { + MethodName: "GetEntryBundle", + Handler: _Rekor_GetEntryBundle_Handler, + }, + { + MethodName: "GetCheckpoint", + Handler: _Rekor_GetCheckpoint_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rekor/v2/rekor_service.proto", +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go new file mode 100644 index 00000000000..8b9b4f1f81e --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go @@ -0,0 +1,338 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: rekor/v2/verifier.proto + +package protobuf + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// PublicKey contains an encoded public key +type PublicKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded public key + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + mi := &file_rekor_v2_verifier_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_verifier_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{0} +} + +func (x *PublicKey) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +// Either a public key or a X.509 cerificiate with an embedded public key +type Verifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Verifier: + // + // *Verifier_PublicKey + // *Verifier_X509Certificate + Verifier isVerifier_Verifier `protobuf_oneof:"verifier"` + // Key encoding and signature algorithm to use for this key + KeyDetails v1.PublicKeyDetails `protobuf:"varint,3,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Verifier) Reset() { + *x = Verifier{} + mi := &file_rekor_v2_verifier_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Verifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Verifier) ProtoMessage() {} + +func (x *Verifier) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_verifier_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Verifier.ProtoReflect.Descriptor instead. +func (*Verifier) Descriptor() ([]byte, []int) { + return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{1} +} + +func (x *Verifier) GetVerifier() isVerifier_Verifier { + if x != nil { + return x.Verifier + } + return nil +} + +func (x *Verifier) GetPublicKey() *PublicKey { + if x != nil { + if x, ok := x.Verifier.(*Verifier_PublicKey); ok { + return x.PublicKey + } + } + return nil +} + +func (x *Verifier) GetX509Certificate() *v1.X509Certificate { + if x != nil { + if x, ok := x.Verifier.(*Verifier_X509Certificate); ok { + return x.X509Certificate + } + } + return nil +} + +func (x *Verifier) GetKeyDetails() v1.PublicKeyDetails { + if x != nil { + return x.KeyDetails + } + return v1.PublicKeyDetails(0) +} + +type isVerifier_Verifier interface { + isVerifier_Verifier() +} + +type Verifier_PublicKey struct { + // DER-encoded public key. Encoding method is specified by the key_details attribute + PublicKey *PublicKey `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3,oneof"` +} + +type Verifier_X509Certificate struct { + // DER-encoded certificate + X509Certificate *v1.X509Certificate `protobuf:"bytes,2,opt,name=x509_certificate,json=x509Certificate,proto3,oneof"` +} + +func (*Verifier_PublicKey) isVerifier_Verifier() {} + +func (*Verifier_X509Certificate) isVerifier_Verifier() {} + +// A signature and an associated verifier +type Signature struct { + state protoimpl.MessageState `protogen:"open.v1"` + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + Verifier *Verifier `protobuf:"bytes,2,opt,name=verifier,proto3" json:"verifier,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Signature) Reset() { + *x = Signature{} + mi := &file_rekor_v2_verifier_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_rekor_v2_verifier_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{2} +} + +func (x *Signature) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *Signature) GetVerifier() *Verifier { + if x != nil { + return x.Verifier + } + return nil +} + +var File_rekor_v2_verifier_proto protoreflect.FileDescriptor + +var file_rekor_v2_verifier_proto_rawDesc = string([]byte{ + 0x0a, 0x17, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, + 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, + 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x08, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, + 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x59, 0x0a, 0x10, + 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x6b, 0x65, 0x79, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x22, 0x6c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x1d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x40, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x42, 0x81, 0x01, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x32, 0x42, 0x0f, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, + 0x74, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, + 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, + 0x72, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_rekor_v2_verifier_proto_rawDescOnce sync.Once + file_rekor_v2_verifier_proto_rawDescData []byte +) + +func file_rekor_v2_verifier_proto_rawDescGZIP() []byte { + file_rekor_v2_verifier_proto_rawDescOnce.Do(func() { + file_rekor_v2_verifier_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_verifier_proto_rawDesc), len(file_rekor_v2_verifier_proto_rawDesc))) + }) + return file_rekor_v2_verifier_proto_rawDescData +} + +var file_rekor_v2_verifier_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_rekor_v2_verifier_proto_goTypes = []any{ + (*PublicKey)(nil), // 0: dev.sigstore.rekor.v2.PublicKey + (*Verifier)(nil), // 1: dev.sigstore.rekor.v2.Verifier + (*Signature)(nil), // 2: dev.sigstore.rekor.v2.Signature + (*v1.X509Certificate)(nil), // 3: dev.sigstore.common.v1.X509Certificate + (v1.PublicKeyDetails)(0), // 4: dev.sigstore.common.v1.PublicKeyDetails +} +var file_rekor_v2_verifier_proto_depIdxs = []int32{ + 0, // 0: dev.sigstore.rekor.v2.Verifier.public_key:type_name -> dev.sigstore.rekor.v2.PublicKey + 3, // 1: dev.sigstore.rekor.v2.Verifier.x509_certificate:type_name -> dev.sigstore.common.v1.X509Certificate + 4, // 2: dev.sigstore.rekor.v2.Verifier.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails + 1, // 3: dev.sigstore.rekor.v2.Signature.verifier:type_name -> dev.sigstore.rekor.v2.Verifier + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_rekor_v2_verifier_proto_init() } +func file_rekor_v2_verifier_proto_init() { + if File_rekor_v2_verifier_proto != nil { + return + } + file_rekor_v2_verifier_proto_msgTypes[1].OneofWrappers = []any{ + (*Verifier_PublicKey)(nil), + (*Verifier_X509Certificate)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_verifier_proto_rawDesc), len(file_rekor_v2_verifier_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rekor_v2_verifier_proto_goTypes, + DependencyIndexes: file_rekor_v2_verifier_proto_depIdxs, + MessageInfos: file_rekor_v2_verifier_proto_msgTypes, + }.Build() + File_rekor_v2_verifier_proto = out.File + file_rekor_v2_verifier_proto_goTypes = nil + file_rekor_v2_verifier_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go new file mode 100644 index 00000000000..03bd6b839cf --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go @@ -0,0 +1,219 @@ +/* +Copyright 2025 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Heavily borrowed from https://gist.githubusercontent.com/AlCutter/c6c69076dc55652e2d278900ccc1a5e7/raw/aac2bafc17a8efa162bd99b4453070b724779307/ecdsa_note.go - thanks, Al + +package note + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/binary" + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "golang.org/x/mod/sumdb/note" +) + +const ( + algEd25519 = 1 + algUndef = 255 + rsaID = "PKIX-RSA-PKCS#1v1.5" +) + +// noteSigner uses an arbitrary sigstore signer to implement golang.org/x/mod/sumdb/note.Signer, +// which is used in Tessera to sign checkpoints in the signed notes format +// (https://github.com/C2SP/C2SP/blob/main/signed-note.md). +type noteSigner struct { + name string + hash uint32 + sign func(msg []byte) ([]byte, error) +} + +// Name returns the server name associated with the key. +func (n *noteSigner) Name() string { + return n.name +} + +// KeyHash returns the key hash. +func (n *noteSigner) KeyHash() uint32 { + return n.hash +} + +// Sign returns a signature for the given message. +func (n *noteSigner) Sign(msg []byte) ([]byte, error) { + return n.sign(msg) +} + +type noteVerifier struct { + name string + hash uint32 + verify func(msg, sig []byte) bool +} + +// Name implements note.Verifier. +func (n *noteVerifier) Name() string { + return n.name +} + +// Keyhash implements note.Verifier. +func (n *noteVerifier) KeyHash() uint32 { + return n.hash +} + +// Verify implements note.Verifier. +func (n *noteVerifier) Verify(msg, sig []byte) bool { + return n.verify(msg, sig) +} + +// isValidName reports whether the name conforms to the spec for the origin string of the note text +// as defined in https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md#note-text. +func isValidName(name string) bool { + return name != "" && utf8.ValidString(name) && strings.IndexFunc(name, unicode.IsSpace) < 0 && !strings.Contains(name, "+") +} + +// genConformantKeyHash generates a truncated (4-byte) and non-truncated +// identifier for typical (non-ECDSA) keys. +func genConformantKeyHash(name string, sigType, key []byte) (uint32, []byte) { + hash := sha256.New() + hash.Write([]byte(name)) + hash.Write([]byte("\n")) + hash.Write(sigType) + hash.Write(key) + sum := hash.Sum(nil) + return binary.BigEndian.Uint32(sum), sum +} + +// ed25519KeyHash generates the 4-byte key ID for an Ed25519 public key. +// Ed25519 keys are the only key type compatible with witnessing. +func ed25519KeyHash(name string, key []byte) (uint32, []byte) { + return genConformantKeyHash(name, []byte{algEd25519}, key) +} + +// ecdsaKeyHash generates the 4-byte key ID for an ECDSA public key. +// ECDSA key IDs do not conform to the note standard for other key type +// (see https://github.com/C2SP/C2SP/blob/8991f70ddf8a11de3a68d5a081e7be27e59d87c8/signed-note.md#signature-types). +func ecdsaKeyHash(key *ecdsa.PublicKey) (uint32, []byte, error) { + marshaled, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return 0, nil, fmt.Errorf("marshaling public key: %w", err) + } + hash := sha256.Sum256(marshaled) + return binary.BigEndian.Uint32(hash[:]), hash[:], nil +} + +// rsaKeyhash generates the 4-byte key ID for an RSA public key. +func rsaKeyHash(name string, key *rsa.PublicKey) (uint32, []byte, error) { + marshaled, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return 0, nil, fmt.Errorf("marshaling public key: %w", err) + } + rsaAlg := append([]byte{algUndef}, []byte(rsaID)...) + id, hash := genConformantKeyHash(name, rsaAlg, marshaled) + return id, hash, nil +} + +// KeyHash generates a truncated (4-byte) and non-truncated identifier for a +// public key/origin +func KeyHash(origin string, key crypto.PublicKey) (uint32, []byte, error) { + var keyID uint32 + var logID []byte + var err error + + switch pk := key.(type) { + case *ecdsa.PublicKey: + keyID, logID, err = ecdsaKeyHash(pk) + if err != nil { + return 0, nil, fmt.Errorf("getting ECDSA key hash: %w", err) + } + case ed25519.PublicKey: + keyID, logID = ed25519KeyHash(origin, pk) + case *rsa.PublicKey: + keyID, logID, err = rsaKeyHash(origin, pk) + if err != nil { + return 0, nil, fmt.Errorf("getting RSA key hash: %w", err) + } + default: + return 0, nil, fmt.Errorf("unsupported key type: %T", key) + } + + return keyID, logID, nil +} + +// NewNoteSigner converts a sigstore/sigstore/pkg/signature.Signer into a note.Signer. +func NewNoteSigner(ctx context.Context, origin string, signer signature.Signer) (note.Signer, error) { + if !isValidName(origin) { + return nil, fmt.Errorf("invalid name %s", origin) + } + + pubKey, err := signer.PublicKey() + if err != nil { + return nil, fmt.Errorf("getting public key: %w", err) + } + + keyID, _, err := KeyHash(origin, pubKey) + if err != nil { + return nil, err + } + + sign := func(msg []byte) ([]byte, error) { + return signer.SignMessage(bytes.NewReader(msg), options.WithContext(ctx)) + } + + return ¬eSigner{ + name: origin, + hash: keyID, + sign: sign, + }, nil +} + +// NewNoteVerifier converts a sigstore/sigstore/pkg/signature.Verifier into a note.Verifier. +func NewNoteVerifier(origin string, verifier signature.Verifier) (note.Verifier, error) { + if !isValidName(origin) { + return nil, fmt.Errorf("invalid name %s", origin) + } + + pubKey, err := verifier.PublicKey() + if err != nil { + return nil, fmt.Errorf("getting public key: %w", err) + } + + keyID, _, err := KeyHash(origin, pubKey) + if err != nil { + return nil, err + } + + return ¬eVerifier{ + name: origin, + hash: keyID, + verify: func(msg, sig []byte) bool { + if err := verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(msg)); err != nil { + return false + } + return true + }, + }, nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go new file mode 100644 index 00000000000..b080977229d --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go @@ -0,0 +1,41 @@ +// Copyright 2025 The Sigstore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verifier + +import ( + "fmt" + + pb "github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf" +) + +// Validate validates there are no missing field in a Verifier protobuf +func Validate(v *pb.Verifier) error { + publicKey := v.GetPublicKey() + x509Cert := v.GetX509Certificate() + if publicKey == nil && x509Cert == nil { + return fmt.Errorf("missing signature public key or X.509 certificate") + } + if publicKey != nil { + if len(publicKey.GetRawBytes()) == 0 { + return fmt.Errorf("missing public key raw bytes") + } + } + if x509Cert != nil { + if len(x509Cert.GetRawBytes()) == 0 { + return fmt.Errorf("missing X.509 certificate raw bytes") + } + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go new file mode 100644 index 00000000000..bb4fedf591c --- /dev/null +++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go @@ -0,0 +1,89 @@ +// +// Copyright 2025 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" + + pbs "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor-tiles/v2/internal/safeint" + f_log "github.com/transparency-dev/formats/log" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" + sumdb_note "golang.org/x/mod/sumdb/note" +) + +// VerifyInclusionProof verifies an entry's inclusion proof +func VerifyInclusionProof(entry *pbs.TransparencyLogEntry, cp *f_log.Checkpoint) error { //nolint: revive + leafHash := rfc6962.DefaultHasher.HashLeaf(entry.CanonicalizedBody) + index, err := safeint.NewSafeInt64(entry.LogIndex) + if err != nil { + return fmt.Errorf("invalid index: %w", err) + } + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, index.U(), cp.Size, leafHash, entry.InclusionProof.Hashes, cp.Hash); err != nil { + return fmt.Errorf("verifying inclusion: %w", err) + } + return nil +} + +// VerifyCheckpoint verifies the signature on the entry's inclusion proof checkpoint +func VerifyCheckpoint(unverifiedCp string, verifier sumdb_note.Verifier) (*f_log.Checkpoint, error) { //nolint: revive + cp, _, _, err := f_log.ParseCheckpoint([]byte(unverifiedCp), verifier.Name(), verifier) + if err != nil { + return nil, fmt.Errorf("unverified checkpoint signature: %v", err) + } + return cp, nil +} + +// VerifyWitnessedCheckpoint verifies the signature on the entry's inclusion proof checkpoint in addition to witness cosignatures. +// This returns the underlying note which contains all verified signatures. +func VerifyWitnessedCheckpoint(unverifiedCp string, verifier sumdb_note.Verifier, otherVerifiers ...sumdb_note.Verifier) (*f_log.Checkpoint, *sumdb_note.Note, error) { //nolint: revive + cp, _, n, err := f_log.ParseCheckpoint([]byte(unverifiedCp), verifier.Name(), verifier, otherVerifiers...) + if err != nil { + return nil, nil, fmt.Errorf("unverified checkpoint signature: %v", err) + } + return cp, n, nil +} + +// VerifyLogEntry verifies the log entry. This includes verifying the signature on the entry's +// inclusion proof checkpoint and verifying the entry inclusion proof +func VerifyLogEntry(entry *pbs.TransparencyLogEntry, verifier sumdb_note.Verifier) error { //nolint: revive + cp, err := VerifyCheckpoint(entry.GetInclusionProof().GetCheckpoint().GetEnvelope(), verifier) + if err != nil { + return err + } + return VerifyInclusionProof(entry, cp) +} + +// VerifyConsistencyProof verifies the latest checkpoint signature and the consistency proof between a previous log size +// and root hash and the latest checkpoint's size and root hash. This may be used by a C2SP witness. +func VerifyConsistencyProof(consistencyProof [][]byte, oldSize uint64, oldRootHash []byte, newUnverifiedCp string, verifier sumdb_note.Verifier) error { //nolint: revive + newCp, err := VerifyCheckpoint(newUnverifiedCp, verifier) + if err != nil { + return err + } + return proof.VerifyConsistency(rfc6962.DefaultHasher, oldSize, newCp.Size, consistencyProof, oldRootHash, newCp.Hash) +} + +// VerifyConsistencyProofWithCheckpoints verifies previous and latest checkpoint signatures and the consistency proof +// between these checkpoints. This may be used by a monitor that persists checkpoints. +func VerifyConsistencyProofWithCheckpoints(consistencyProof [][]byte, oldUnverifiedCp, newUnverifiedCp string, verifier sumdb_note.Verifier) error { //nolint: revive + oldCp, err := VerifyCheckpoint(oldUnverifiedCp, verifier) + if err != nil { + return err + } + return VerifyConsistencyProof(consistencyProof, oldCp.Size, oldCp.Hash, newUnverifiedCp, verifier) +} diff --git a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md new file mode 100644 index 00000000000..bdff02765c5 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md @@ -0,0 +1,122 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish +to make via an [issue](https://github.com/sigstore/rekor/issues). + +## Pull Request Process + +1. Create an [issue](https://github.com/sigstore/rekor/issues) + outlining the fix or feature. +2. Fork the rekor repository to your own github account and clone it locally. +3. Hack on your changes. +4. Update the README.md with details of changes to any interface, this includes new environment + variables, exposed ports, useful file locations, CLI parameters and + new or changed configuration values. +5. Correctly format your commit message see [Commit Messages](#Commit Message Guidelines) + below. +6. Ensure that CI passes, if it fails, fix the failures. +7. Every pull request requires a review from the [core rekor team](https://github.com/orgs/github.com/sigstore/teams/core-team) + before merging. +8. If your pull request consists of more than one commit, please squash your + commits as described in [Squash Commits](#Squash Commits) + +## Commit Message Guidelines + +We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article]((https://chris.beams.io/posts/git-commit/). + +Well formed commit messages not only help reviewers understand the nature of +the Pull Request, but also assists the release process where commit messages +are used to generate release notes. + +A good example of a commit message would be as follows: + +``` +Summarize changes in around 50 characters or less + +More detailed explanatory text, if necessary. Wrap it to about 72 +characters or so. In some contexts, the first line is treated as the +subject of the commit and the rest of the text as the body. The +blank line separating the summary from the body is critical (unless +you omit the body entirely); various tools like `log`, `shortlog` +and `rebase` can get confused if you run the two together. + +Explain the problem that this commit is solving. Focus on why you +are making this change as opposed to how (the code explains that). +Are there side effects or other unintuitive consequences of this +change? Here's the place to explain them. + +Further paragraphs come after blank lines. + + - Bullet points are okay, too + + - Typically a hyphen or asterisk is used for the bullet, preceded + by a single space, with blank lines in between, but conventions + vary here + +If you use an issue tracker, put references to them at the bottom, +like this: + +Resolves: #123 +See also: #456, #789 +``` + +Note the `Resolves #123` tag, this references the issue raised and allows us to +ensure issues are associated and closed when a pull request is merged. + +Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) +for a complete list of issue references. + +## Squash Commits + +Should your pull request consist of more than one commit (perhaps due to +a change being requested during the review cycle), please perform a git squash +once a reviewer has approved your pull request. + +A squash can be performed as follows. Let's say you have the following commits: + + initial commit + second commit + final commit + +Run the command below with the number set to the total commits you wish to +squash (in our case 3 commits): + + git rebase -i HEAD~3 + +You default text editor will then open up and you will see the following:: + + pick eb36612 initial commit + pick 9ac8968 second commit + pick a760569 final commit + + # Rebase eb1429f..a760569 onto eb1429f (3 commands) + +We want to rebase on top of our first commit, so we change the other two commits +to `squash`: + + pick eb36612 initial commit + squash 9ac8968 second commit + squash a760569 final commit + +After this, should you wish to update your commit message to better summarise +all of your pull request, run: + + git commit --amend + +You will then need to force push (assuming your initial commit(s) were posted +to github): + + git push origin your-branch --force + +Alternatively, a core member can squash your commits within Github. + +## DCO Signoff + +Make sure to sign the [Developer Certificate of +Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff). + +## Code of Conduct + +Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. +Please take a moment to read the [CODE_OF_CONDUCT.md](https://github.com/sigstore/rekor/blob/master/CODE_OF_CONDUCT.md) document. + diff --git a/vendor/github.com/sigstore/rekor/COPYRIGHT.txt b/vendor/github.com/sigstore/rekor/COPYRIGHT.txt new file mode 100644 index 00000000000..7a01c849864 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/COPYRIGHT.txt @@ -0,0 +1,14 @@ + +Copyright 2021 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/LICENSE b/vendor/github.com/sigstore/rekor/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go new file mode 100644 index 00000000000..481fa2bda5e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go @@ -0,0 +1,164 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// NewCreateLogEntryParams creates a new CreateLogEntryParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateLogEntryParams() *CreateLogEntryParams { + return &CreateLogEntryParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateLogEntryParamsWithTimeout creates a new CreateLogEntryParams object +// with the ability to set a timeout on a request. +func NewCreateLogEntryParamsWithTimeout(timeout time.Duration) *CreateLogEntryParams { + return &CreateLogEntryParams{ + timeout: timeout, + } +} + +// NewCreateLogEntryParamsWithContext creates a new CreateLogEntryParams object +// with the ability to set a context for a request. +func NewCreateLogEntryParamsWithContext(ctx context.Context) *CreateLogEntryParams { + return &CreateLogEntryParams{ + Context: ctx, + } +} + +// NewCreateLogEntryParamsWithHTTPClient creates a new CreateLogEntryParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateLogEntryParamsWithHTTPClient(client *http.Client) *CreateLogEntryParams { + return &CreateLogEntryParams{ + HTTPClient: client, + } +} + +/* +CreateLogEntryParams contains all the parameters to send to the API endpoint + + for the create log entry operation. + + Typically these are written to a http.Request. +*/ +type CreateLogEntryParams struct { + + // ProposedEntry. + ProposedEntry models.ProposedEntry + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create log entry params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateLogEntryParams) WithDefaults() *CreateLogEntryParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create log entry params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateLogEntryParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create log entry params +func (o *CreateLogEntryParams) WithTimeout(timeout time.Duration) *CreateLogEntryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create log entry params +func (o *CreateLogEntryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create log entry params +func (o *CreateLogEntryParams) WithContext(ctx context.Context) *CreateLogEntryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create log entry params +func (o *CreateLogEntryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create log entry params +func (o *CreateLogEntryParams) WithHTTPClient(client *http.Client) *CreateLogEntryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create log entry params +func (o *CreateLogEntryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithProposedEntry adds the proposedEntry to the create log entry params +func (o *CreateLogEntryParams) WithProposedEntry(proposedEntry models.ProposedEntry) *CreateLogEntryParams { + o.SetProposedEntry(proposedEntry) + return o +} + +// SetProposedEntry adds the proposedEntry to the create log entry params +func (o *CreateLogEntryParams) SetProposedEntry(proposedEntry models.ProposedEntry) { + o.ProposedEntry = proposedEntry +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateLogEntryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.ProposedEntry); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go new file mode 100644 index 00000000000..de665ed9cb5 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go @@ -0,0 +1,397 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// CreateLogEntryReader is a Reader for the CreateLogEntry structure. +type CreateLogEntryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateLogEntryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 201: + result := NewCreateLogEntryCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateLogEntryBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewCreateLogEntryConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewCreateLogEntryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCreateLogEntryCreated creates a CreateLogEntryCreated with default headers values +func NewCreateLogEntryCreated() *CreateLogEntryCreated { + return &CreateLogEntryCreated{} +} + +/* +CreateLogEntryCreated describes a response with status code 201, with default header values. + +Returns the entry created in the transparency log +*/ +type CreateLogEntryCreated struct { + + /* UUID of log entry + */ + ETag string + + /* URI location of log entry + + Format: uri + */ + Location strfmt.URI + + Payload models.LogEntry +} + +// IsSuccess returns true when this create log entry created response has a 2xx status code +func (o *CreateLogEntryCreated) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create log entry created response has a 3xx status code +func (o *CreateLogEntryCreated) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create log entry created response has a 4xx status code +func (o *CreateLogEntryCreated) IsClientError() bool { + return false +} + +// IsServerError returns true when this create log entry created response has a 5xx status code +func (o *CreateLogEntryCreated) IsServerError() bool { + return false +} + +// IsCode returns true when this create log entry created response a status code equal to that given +func (o *CreateLogEntryCreated) IsCode(code int) bool { + return code == 201 +} + +// Code gets the status code for the create log entry created response +func (o *CreateLogEntryCreated) Code() int { + return 201 +} + +func (o *CreateLogEntryCreated) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload) +} + +func (o *CreateLogEntryCreated) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload) +} + +func (o *CreateLogEntryCreated) GetPayload() models.LogEntry { + return o.Payload +} + +func (o *CreateLogEntryCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // hydrates response header ETag + hdrETag := response.GetHeader("ETag") + + if hdrETag != "" { + o.ETag = hdrETag + } + + // hydrates response header Location + hdrLocation := response.GetHeader("Location") + + if hdrLocation != "" { + vallocation, err := formats.Parse("uri", hdrLocation) + if err != nil { + return errors.InvalidType("Location", "header", "strfmt.URI", hdrLocation) + } + o.Location = *(vallocation.(*strfmt.URI)) + } + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateLogEntryBadRequest creates a CreateLogEntryBadRequest with default headers values +func NewCreateLogEntryBadRequest() *CreateLogEntryBadRequest { + return &CreateLogEntryBadRequest{} +} + +/* +CreateLogEntryBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type CreateLogEntryBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this create log entry bad request response has a 2xx status code +func (o *CreateLogEntryBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create log entry bad request response has a 3xx status code +func (o *CreateLogEntryBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create log entry bad request response has a 4xx status code +func (o *CreateLogEntryBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create log entry bad request response has a 5xx status code +func (o *CreateLogEntryBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create log entry bad request response a status code equal to that given +func (o *CreateLogEntryBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create log entry bad request response +func (o *CreateLogEntryBadRequest) Code() int { + return 400 +} + +func (o *CreateLogEntryBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload) +} + +func (o *CreateLogEntryBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload) +} + +func (o *CreateLogEntryBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *CreateLogEntryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateLogEntryConflict creates a CreateLogEntryConflict with default headers values +func NewCreateLogEntryConflict() *CreateLogEntryConflict { + return &CreateLogEntryConflict{} +} + +/* +CreateLogEntryConflict describes a response with status code 409, with default header values. + +The request conflicts with the current state of the transparency log +*/ +type CreateLogEntryConflict struct { + Location strfmt.URI + + Payload *models.Error +} + +// IsSuccess returns true when this create log entry conflict response has a 2xx status code +func (o *CreateLogEntryConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create log entry conflict response has a 3xx status code +func (o *CreateLogEntryConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create log entry conflict response has a 4xx status code +func (o *CreateLogEntryConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this create log entry conflict response has a 5xx status code +func (o *CreateLogEntryConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this create log entry conflict response a status code equal to that given +func (o *CreateLogEntryConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the create log entry conflict response +func (o *CreateLogEntryConflict) Code() int { + return 409 +} + +func (o *CreateLogEntryConflict) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload) +} + +func (o *CreateLogEntryConflict) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload) +} + +func (o *CreateLogEntryConflict) GetPayload() *models.Error { + return o.Payload +} + +func (o *CreateLogEntryConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // hydrates response header Location + hdrLocation := response.GetHeader("Location") + + if hdrLocation != "" { + vallocation, err := formats.Parse("uri", hdrLocation) + if err != nil { + return errors.InvalidType("Location", "header", "strfmt.URI", hdrLocation) + } + o.Location = *(vallocation.(*strfmt.URI)) + } + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewCreateLogEntryDefault creates a CreateLogEntryDefault with default headers values +func NewCreateLogEntryDefault(code int) *CreateLogEntryDefault { + return &CreateLogEntryDefault{ + _statusCode: code, + } +} + +/* +CreateLogEntryDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type CreateLogEntryDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this create log entry default response has a 2xx status code +func (o *CreateLogEntryDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this create log entry default response has a 3xx status code +func (o *CreateLogEntryDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this create log entry default response has a 4xx status code +func (o *CreateLogEntryDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this create log entry default response has a 5xx status code +func (o *CreateLogEntryDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this create log entry default response a status code equal to that given +func (o *CreateLogEntryDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the create log entry default response +func (o *CreateLogEntryDefault) Code() int { + return o._statusCode +} + +func (o *CreateLogEntryDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload) +} + +func (o *CreateLogEntryDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload) +} + +func (o *CreateLogEntryDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *CreateLogEntryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go new file mode 100644 index 00000000000..f893db3dc97 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go @@ -0,0 +1,259 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new entries API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new entries API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new entries API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for entries API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + CreateLogEntry(params *CreateLogEntryParams, opts ...ClientOption) (*CreateLogEntryCreated, error) + + GetLogEntryByIndex(params *GetLogEntryByIndexParams, opts ...ClientOption) (*GetLogEntryByIndexOK, error) + + GetLogEntryByUUID(params *GetLogEntryByUUIDParams, opts ...ClientOption) (*GetLogEntryByUUIDOK, error) + + SearchLogQuery(params *SearchLogQueryParams, opts ...ClientOption) (*SearchLogQueryOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +CreateLogEntry creates an entry in the transparency log + +Creates an entry in the transparency log for a detached signature, public key, and content. Items can be included in the request or fetched by the server when URLs are specified. +*/ +func (a *Client) CreateLogEntry(params *CreateLogEntryParams, opts ...ClientOption) (*CreateLogEntryCreated, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewCreateLogEntryParams() + } + op := &runtime.ClientOperation{ + ID: "createLogEntry", + Method: "POST", + PathPattern: "/api/v1/log/entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CreateLogEntryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*CreateLogEntryCreated) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*CreateLogEntryDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GetLogEntryByIndex retrieves an entry and inclusion proof from the transparency log if it exists by index +*/ +func (a *Client) GetLogEntryByIndex(params *GetLogEntryByIndexParams, opts ...ClientOption) (*GetLogEntryByIndexOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogEntryByIndexParams() + } + op := &runtime.ClientOperation{ + ID: "getLogEntryByIndex", + Method: "GET", + PathPattern: "/api/v1/log/entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogEntryByIndexReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogEntryByIndexOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogEntryByIndexDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GetLogEntryByUUID gets log entry and information required to generate an inclusion proof for the entry in the transparency log + +Returns the entry, root hash, tree size, and a list of hashes that can be used to calculate proof of an entry being included in the transparency log +*/ +func (a *Client) GetLogEntryByUUID(params *GetLogEntryByUUIDParams, opts ...ClientOption) (*GetLogEntryByUUIDOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogEntryByUUIDParams() + } + op := &runtime.ClientOperation{ + ID: "getLogEntryByUUID", + Method: "GET", + PathPattern: "/api/v1/log/entries/{entryUUID}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogEntryByUUIDReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogEntryByUUIDOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogEntryByUUIDDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SearchLogQuery searches transparency log for one or more log entries +*/ +func (a *Client) SearchLogQuery(params *SearchLogQueryParams, opts ...ClientOption) (*SearchLogQueryOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewSearchLogQueryParams() + } + op := &runtime.ClientOperation{ + ID: "searchLogQuery", + Method: "POST", + PathPattern: "/api/v1/log/entries/retrieve", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SearchLogQueryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*SearchLogQueryOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*SearchLogQueryDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go new file mode 100644 index 00000000000..e2252275119 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetLogEntryByIndexParams creates a new GetLogEntryByIndexParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogEntryByIndexParams() *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogEntryByIndexParamsWithTimeout creates a new GetLogEntryByIndexParams object +// with the ability to set a timeout on a request. +func NewGetLogEntryByIndexParamsWithTimeout(timeout time.Duration) *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + timeout: timeout, + } +} + +// NewGetLogEntryByIndexParamsWithContext creates a new GetLogEntryByIndexParams object +// with the ability to set a context for a request. +func NewGetLogEntryByIndexParamsWithContext(ctx context.Context) *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + Context: ctx, + } +} + +// NewGetLogEntryByIndexParamsWithHTTPClient creates a new GetLogEntryByIndexParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogEntryByIndexParamsWithHTTPClient(client *http.Client) *GetLogEntryByIndexParams { + return &GetLogEntryByIndexParams{ + HTTPClient: client, + } +} + +/* +GetLogEntryByIndexParams contains all the parameters to send to the API endpoint + + for the get log entry by index operation. + + Typically these are written to a http.Request. +*/ +type GetLogEntryByIndexParams struct { + + /* LogIndex. + + specifies the index of the entry in the transparency log to be retrieved + */ + LogIndex int64 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log entry by index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByIndexParams) WithDefaults() *GetLogEntryByIndexParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log entry by index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByIndexParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithTimeout(timeout time.Duration) *GetLogEntryByIndexParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithContext(ctx context.Context) *GetLogEntryByIndexParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithHTTPClient(client *http.Client) *GetLogEntryByIndexParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithLogIndex adds the logIndex to the get log entry by index params +func (o *GetLogEntryByIndexParams) WithLogIndex(logIndex int64) *GetLogEntryByIndexParams { + o.SetLogIndex(logIndex) + return o +} + +// SetLogIndex adds the logIndex to the get log entry by index params +func (o *GetLogEntryByIndexParams) SetLogIndex(logIndex int64) { + o.LogIndex = logIndex +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogEntryByIndexParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param logIndex + qrLogIndex := o.LogIndex + qLogIndex := swag.FormatInt64(qrLogIndex) + if qLogIndex != "" { + + if err := r.SetQueryParam("logIndex", qLogIndex); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go new file mode 100644 index 00000000000..40e17b3cfcd --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go @@ -0,0 +1,264 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogEntryByIndexReader is a Reader for the GetLogEntryByIndex structure. +type GetLogEntryByIndexReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogEntryByIndexReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogEntryByIndexOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewGetLogEntryByIndexNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewGetLogEntryByIndexDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogEntryByIndexOK creates a GetLogEntryByIndexOK with default headers values +func NewGetLogEntryByIndexOK() *GetLogEntryByIndexOK { + return &GetLogEntryByIndexOK{} +} + +/* +GetLogEntryByIndexOK describes a response with status code 200, with default header values. + +the entry in the transparency log requested along with an inclusion proof +*/ +type GetLogEntryByIndexOK struct { + Payload models.LogEntry +} + +// IsSuccess returns true when this get log entry by index o k response has a 2xx status code +func (o *GetLogEntryByIndexOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log entry by index o k response has a 3xx status code +func (o *GetLogEntryByIndexOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by index o k response has a 4xx status code +func (o *GetLogEntryByIndexOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log entry by index o k response has a 5xx status code +func (o *GetLogEntryByIndexOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by index o k response a status code equal to that given +func (o *GetLogEntryByIndexOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log entry by index o k response +func (o *GetLogEntryByIndexOK) Code() int { + return 200 +} + +func (o *GetLogEntryByIndexOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload) +} + +func (o *GetLogEntryByIndexOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload) +} + +func (o *GetLogEntryByIndexOK) GetPayload() models.LogEntry { + return o.Payload +} + +func (o *GetLogEntryByIndexOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogEntryByIndexNotFound creates a GetLogEntryByIndexNotFound with default headers values +func NewGetLogEntryByIndexNotFound() *GetLogEntryByIndexNotFound { + return &GetLogEntryByIndexNotFound{} +} + +/* +GetLogEntryByIndexNotFound describes a response with status code 404, with default header values. + +The content requested could not be found +*/ +type GetLogEntryByIndexNotFound struct { +} + +// IsSuccess returns true when this get log entry by index not found response has a 2xx status code +func (o *GetLogEntryByIndexNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get log entry by index not found response has a 3xx status code +func (o *GetLogEntryByIndexNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by index not found response has a 4xx status code +func (o *GetLogEntryByIndexNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get log entry by index not found response has a 5xx status code +func (o *GetLogEntryByIndexNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by index not found response a status code equal to that given +func (o *GetLogEntryByIndexNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get log entry by index not found response +func (o *GetLogEntryByIndexNotFound) Code() int { + return 404 +} + +func (o *GetLogEntryByIndexNotFound) Error() string { + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404) +} + +func (o *GetLogEntryByIndexNotFound) String() string { + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404) +} + +func (o *GetLogEntryByIndexNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetLogEntryByIndexDefault creates a GetLogEntryByIndexDefault with default headers values +func NewGetLogEntryByIndexDefault(code int) *GetLogEntryByIndexDefault { + return &GetLogEntryByIndexDefault{ + _statusCode: code, + } +} + +/* +GetLogEntryByIndexDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogEntryByIndexDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log entry by index default response has a 2xx status code +func (o *GetLogEntryByIndexDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log entry by index default response has a 3xx status code +func (o *GetLogEntryByIndexDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log entry by index default response has a 4xx status code +func (o *GetLogEntryByIndexDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log entry by index default response has a 5xx status code +func (o *GetLogEntryByIndexDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log entry by index default response a status code equal to that given +func (o *GetLogEntryByIndexDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log entry by index default response +func (o *GetLogEntryByIndexDefault) Code() int { + return o._statusCode +} + +func (o *GetLogEntryByIndexDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByIndexDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByIndexDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogEntryByIndexDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go new file mode 100644 index 00000000000..5c88b526546 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go @@ -0,0 +1,167 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetLogEntryByUUIDParams creates a new GetLogEntryByUUIDParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogEntryByUUIDParams() *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogEntryByUUIDParamsWithTimeout creates a new GetLogEntryByUUIDParams object +// with the ability to set a timeout on a request. +func NewGetLogEntryByUUIDParamsWithTimeout(timeout time.Duration) *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + timeout: timeout, + } +} + +// NewGetLogEntryByUUIDParamsWithContext creates a new GetLogEntryByUUIDParams object +// with the ability to set a context for a request. +func NewGetLogEntryByUUIDParamsWithContext(ctx context.Context) *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + Context: ctx, + } +} + +// NewGetLogEntryByUUIDParamsWithHTTPClient creates a new GetLogEntryByUUIDParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogEntryByUUIDParamsWithHTTPClient(client *http.Client) *GetLogEntryByUUIDParams { + return &GetLogEntryByUUIDParams{ + HTTPClient: client, + } +} + +/* +GetLogEntryByUUIDParams contains all the parameters to send to the API endpoint + + for the get log entry by UUID operation. + + Typically these are written to a http.Request. +*/ +type GetLogEntryByUUIDParams struct { + + /* EntryUUID. + + the UUID of the entry for which the inclusion proof information should be returned + */ + EntryUUID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log entry by UUID params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByUUIDParams) WithDefaults() *GetLogEntryByUUIDParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log entry by UUID params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogEntryByUUIDParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithTimeout(timeout time.Duration) *GetLogEntryByUUIDParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithContext(ctx context.Context) *GetLogEntryByUUIDParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithHTTPClient(client *http.Client) *GetLogEntryByUUIDParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEntryUUID adds the entryUUID to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) WithEntryUUID(entryUUID string) *GetLogEntryByUUIDParams { + o.SetEntryUUID(entryUUID) + return o +} + +// SetEntryUUID adds the entryUuid to the get log entry by UUID params +func (o *GetLogEntryByUUIDParams) SetEntryUUID(entryUUID string) { + o.EntryUUID = entryUUID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogEntryByUUIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param entryUUID + if err := r.SetPathParam("entryUUID", o.EntryUUID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go new file mode 100644 index 00000000000..3498e27287e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go @@ -0,0 +1,264 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogEntryByUUIDReader is a Reader for the GetLogEntryByUUID structure. +type GetLogEntryByUUIDReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogEntryByUUIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogEntryByUUIDOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewGetLogEntryByUUIDNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewGetLogEntryByUUIDDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogEntryByUUIDOK creates a GetLogEntryByUUIDOK with default headers values +func NewGetLogEntryByUUIDOK() *GetLogEntryByUUIDOK { + return &GetLogEntryByUUIDOK{} +} + +/* +GetLogEntryByUUIDOK describes a response with status code 200, with default header values. + +Information needed for a client to compute the inclusion proof +*/ +type GetLogEntryByUUIDOK struct { + Payload models.LogEntry +} + +// IsSuccess returns true when this get log entry by Uuid o k response has a 2xx status code +func (o *GetLogEntryByUUIDOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log entry by Uuid o k response has a 3xx status code +func (o *GetLogEntryByUUIDOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by Uuid o k response has a 4xx status code +func (o *GetLogEntryByUUIDOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log entry by Uuid o k response has a 5xx status code +func (o *GetLogEntryByUUIDOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by Uuid o k response a status code equal to that given +func (o *GetLogEntryByUUIDOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log entry by Uuid o k response +func (o *GetLogEntryByUUIDOK) Code() int { + return 200 +} + +func (o *GetLogEntryByUUIDOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload) +} + +func (o *GetLogEntryByUUIDOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload) +} + +func (o *GetLogEntryByUUIDOK) GetPayload() models.LogEntry { + return o.Payload +} + +func (o *GetLogEntryByUUIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogEntryByUUIDNotFound creates a GetLogEntryByUUIDNotFound with default headers values +func NewGetLogEntryByUUIDNotFound() *GetLogEntryByUUIDNotFound { + return &GetLogEntryByUUIDNotFound{} +} + +/* +GetLogEntryByUUIDNotFound describes a response with status code 404, with default header values. + +The content requested could not be found +*/ +type GetLogEntryByUUIDNotFound struct { +} + +// IsSuccess returns true when this get log entry by Uuid not found response has a 2xx status code +func (o *GetLogEntryByUUIDNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get log entry by Uuid not found response has a 3xx status code +func (o *GetLogEntryByUUIDNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log entry by Uuid not found response has a 4xx status code +func (o *GetLogEntryByUUIDNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get log entry by Uuid not found response has a 5xx status code +func (o *GetLogEntryByUUIDNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get log entry by Uuid not found response a status code equal to that given +func (o *GetLogEntryByUUIDNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get log entry by Uuid not found response +func (o *GetLogEntryByUUIDNotFound) Code() int { + return 404 +} + +func (o *GetLogEntryByUUIDNotFound) Error() string { + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404) +} + +func (o *GetLogEntryByUUIDNotFound) String() string { + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404) +} + +func (o *GetLogEntryByUUIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetLogEntryByUUIDDefault creates a GetLogEntryByUUIDDefault with default headers values +func NewGetLogEntryByUUIDDefault(code int) *GetLogEntryByUUIDDefault { + return &GetLogEntryByUUIDDefault{ + _statusCode: code, + } +} + +/* +GetLogEntryByUUIDDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogEntryByUUIDDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log entry by UUID default response has a 2xx status code +func (o *GetLogEntryByUUIDDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log entry by UUID default response has a 3xx status code +func (o *GetLogEntryByUUIDDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log entry by UUID default response has a 4xx status code +func (o *GetLogEntryByUUIDDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log entry by UUID default response has a 5xx status code +func (o *GetLogEntryByUUIDDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log entry by UUID default response a status code equal to that given +func (o *GetLogEntryByUUIDDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log entry by UUID default response +func (o *GetLogEntryByUUIDDefault) Code() int { + return o._statusCode +} + +func (o *GetLogEntryByUUIDDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByUUIDDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload) +} + +func (o *GetLogEntryByUUIDDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogEntryByUUIDDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go new file mode 100644 index 00000000000..ed158ce23e3 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go @@ -0,0 +1,166 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// NewSearchLogQueryParams creates a new SearchLogQueryParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSearchLogQueryParams() *SearchLogQueryParams { + return &SearchLogQueryParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSearchLogQueryParamsWithTimeout creates a new SearchLogQueryParams object +// with the ability to set a timeout on a request. +func NewSearchLogQueryParamsWithTimeout(timeout time.Duration) *SearchLogQueryParams { + return &SearchLogQueryParams{ + timeout: timeout, + } +} + +// NewSearchLogQueryParamsWithContext creates a new SearchLogQueryParams object +// with the ability to set a context for a request. +func NewSearchLogQueryParamsWithContext(ctx context.Context) *SearchLogQueryParams { + return &SearchLogQueryParams{ + Context: ctx, + } +} + +// NewSearchLogQueryParamsWithHTTPClient creates a new SearchLogQueryParams object +// with the ability to set a custom HTTPClient for a request. +func NewSearchLogQueryParamsWithHTTPClient(client *http.Client) *SearchLogQueryParams { + return &SearchLogQueryParams{ + HTTPClient: client, + } +} + +/* +SearchLogQueryParams contains all the parameters to send to the API endpoint + + for the search log query operation. + + Typically these are written to a http.Request. +*/ +type SearchLogQueryParams struct { + + // Entry. + Entry *models.SearchLogQuery + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the search log query params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchLogQueryParams) WithDefaults() *SearchLogQueryParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the search log query params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchLogQueryParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the search log query params +func (o *SearchLogQueryParams) WithTimeout(timeout time.Duration) *SearchLogQueryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the search log query params +func (o *SearchLogQueryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the search log query params +func (o *SearchLogQueryParams) WithContext(ctx context.Context) *SearchLogQueryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the search log query params +func (o *SearchLogQueryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the search log query params +func (o *SearchLogQueryParams) WithHTTPClient(client *http.Client) *SearchLogQueryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the search log query params +func (o *SearchLogQueryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEntry adds the entry to the search log query params +func (o *SearchLogQueryParams) WithEntry(entry *models.SearchLogQuery) *SearchLogQueryParams { + o.SetEntry(entry) + return o +} + +// SetEntry adds the entry to the search log query params +func (o *SearchLogQueryParams) SetEntry(entry *models.SearchLogQuery) { + o.Entry = entry +} + +// WriteToRequest writes these params to a swagger request +func (o *SearchLogQueryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Entry != nil { + if err := r.SetBodyParam(o.Entry); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go new file mode 100644 index 00000000000..13d5ba27864 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go @@ -0,0 +1,354 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package entries + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// SearchLogQueryReader is a Reader for the SearchLogQuery structure. +type SearchLogQueryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SearchLogQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewSearchLogQueryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewSearchLogQueryBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewSearchLogQueryUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewSearchLogQueryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSearchLogQueryOK creates a SearchLogQueryOK with default headers values +func NewSearchLogQueryOK() *SearchLogQueryOK { + return &SearchLogQueryOK{} +} + +/* +SearchLogQueryOK describes a response with status code 200, with default header values. + +Returns zero or more entries from the transparency log, according to how many were included in request query +*/ +type SearchLogQueryOK struct { + Payload []models.LogEntry +} + +// IsSuccess returns true when this search log query o k response has a 2xx status code +func (o *SearchLogQueryOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this search log query o k response has a 3xx status code +func (o *SearchLogQueryOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search log query o k response has a 4xx status code +func (o *SearchLogQueryOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this search log query o k response has a 5xx status code +func (o *SearchLogQueryOK) IsServerError() bool { + return false +} + +// IsCode returns true when this search log query o k response a status code equal to that given +func (o *SearchLogQueryOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the search log query o k response +func (o *SearchLogQueryOK) Code() int { + return 200 +} + +func (o *SearchLogQueryOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload) +} + +func (o *SearchLogQueryOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload) +} + +func (o *SearchLogQueryOK) GetPayload() []models.LogEntry { + return o.Payload +} + +func (o *SearchLogQueryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchLogQueryBadRequest creates a SearchLogQueryBadRequest with default headers values +func NewSearchLogQueryBadRequest() *SearchLogQueryBadRequest { + return &SearchLogQueryBadRequest{} +} + +/* +SearchLogQueryBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type SearchLogQueryBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this search log query bad request response has a 2xx status code +func (o *SearchLogQueryBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this search log query bad request response has a 3xx status code +func (o *SearchLogQueryBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search log query bad request response has a 4xx status code +func (o *SearchLogQueryBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this search log query bad request response has a 5xx status code +func (o *SearchLogQueryBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this search log query bad request response a status code equal to that given +func (o *SearchLogQueryBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the search log query bad request response +func (o *SearchLogQueryBadRequest) Code() int { + return 400 +} + +func (o *SearchLogQueryBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload) +} + +func (o *SearchLogQueryBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload) +} + +func (o *SearchLogQueryBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchLogQueryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchLogQueryUnprocessableEntity creates a SearchLogQueryUnprocessableEntity with default headers values +func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity { + return &SearchLogQueryUnprocessableEntity{} +} + +/* +SearchLogQueryUnprocessableEntity describes a response with status code 422, with default header values. + +The server understood the request but is unable to process the contained instructions +*/ +type SearchLogQueryUnprocessableEntity struct { + Payload *models.Error +} + +// IsSuccess returns true when this search log query unprocessable entity response has a 2xx status code +func (o *SearchLogQueryUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this search log query unprocessable entity response has a 3xx status code +func (o *SearchLogQueryUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search log query unprocessable entity response has a 4xx status code +func (o *SearchLogQueryUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this search log query unprocessable entity response has a 5xx status code +func (o *SearchLogQueryUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this search log query unprocessable entity response a status code equal to that given +func (o *SearchLogQueryUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the search log query unprocessable entity response +func (o *SearchLogQueryUnprocessableEntity) Code() int { + return 422 +} + +func (o *SearchLogQueryUnprocessableEntity) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload) +} + +func (o *SearchLogQueryUnprocessableEntity) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload) +} + +func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchLogQueryUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchLogQueryDefault creates a SearchLogQueryDefault with default headers values +func NewSearchLogQueryDefault(code int) *SearchLogQueryDefault { + return &SearchLogQueryDefault{ + _statusCode: code, + } +} + +/* +SearchLogQueryDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type SearchLogQueryDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this search log query default response has a 2xx status code +func (o *SearchLogQueryDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this search log query default response has a 3xx status code +func (o *SearchLogQueryDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this search log query default response has a 4xx status code +func (o *SearchLogQueryDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this search log query default response has a 5xx status code +func (o *SearchLogQueryDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this search log query default response a status code equal to that given +func (o *SearchLogQueryDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the search log query default response +func (o *SearchLogQueryDefault) Code() int { + return o._statusCode +} + +func (o *SearchLogQueryDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload) +} + +func (o *SearchLogQueryDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload) +} + +func (o *SearchLogQueryDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchLogQueryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go new file mode 100644 index 00000000000..80db490317c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go @@ -0,0 +1,127 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package index + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new index API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new index API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new index API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for index API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + SearchIndex(params *SearchIndexParams, opts ...ClientOption) (*SearchIndexOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* + SearchIndex searches index by entry metadata + + EXPERIMENTAL - this endpoint is offered as best effort only and may be changed or removed in future releases. + +The results returned from this endpoint may be incomplete. +*/ +func (a *Client) SearchIndex(params *SearchIndexParams, opts ...ClientOption) (*SearchIndexOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewSearchIndexParams() + } + op := &runtime.ClientOperation{ + ID: "searchIndex", + Method: "POST", + PathPattern: "/api/v1/index/retrieve", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SearchIndexReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*SearchIndexOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*SearchIndexDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go new file mode 100644 index 00000000000..c1694193ef9 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go @@ -0,0 +1,166 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package index + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// NewSearchIndexParams creates a new SearchIndexParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSearchIndexParams() *SearchIndexParams { + return &SearchIndexParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSearchIndexParamsWithTimeout creates a new SearchIndexParams object +// with the ability to set a timeout on a request. +func NewSearchIndexParamsWithTimeout(timeout time.Duration) *SearchIndexParams { + return &SearchIndexParams{ + timeout: timeout, + } +} + +// NewSearchIndexParamsWithContext creates a new SearchIndexParams object +// with the ability to set a context for a request. +func NewSearchIndexParamsWithContext(ctx context.Context) *SearchIndexParams { + return &SearchIndexParams{ + Context: ctx, + } +} + +// NewSearchIndexParamsWithHTTPClient creates a new SearchIndexParams object +// with the ability to set a custom HTTPClient for a request. +func NewSearchIndexParamsWithHTTPClient(client *http.Client) *SearchIndexParams { + return &SearchIndexParams{ + HTTPClient: client, + } +} + +/* +SearchIndexParams contains all the parameters to send to the API endpoint + + for the search index operation. + + Typically these are written to a http.Request. +*/ +type SearchIndexParams struct { + + // Query. + Query *models.SearchIndex + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the search index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchIndexParams) WithDefaults() *SearchIndexParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the search index params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SearchIndexParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the search index params +func (o *SearchIndexParams) WithTimeout(timeout time.Duration) *SearchIndexParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the search index params +func (o *SearchIndexParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the search index params +func (o *SearchIndexParams) WithContext(ctx context.Context) *SearchIndexParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the search index params +func (o *SearchIndexParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the search index params +func (o *SearchIndexParams) WithHTTPClient(client *http.Client) *SearchIndexParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the search index params +func (o *SearchIndexParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithQuery adds the query to the search index params +func (o *SearchIndexParams) WithQuery(query *models.SearchIndex) *SearchIndexParams { + o.SetQuery(query) + return o +} + +// SetQuery adds the query to the search index params +func (o *SearchIndexParams) SetQuery(query *models.SearchIndex) { + o.Query = query +} + +// WriteToRequest writes these params to a swagger request +func (o *SearchIndexParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Query != nil { + if err := r.SetBodyParam(o.Query); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go new file mode 100644 index 00000000000..8c62eca17b1 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go @@ -0,0 +1,278 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package index + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// SearchIndexReader is a Reader for the SearchIndex structure. +type SearchIndexReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SearchIndexReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewSearchIndexOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewSearchIndexBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewSearchIndexDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSearchIndexOK creates a SearchIndexOK with default headers values +func NewSearchIndexOK() *SearchIndexOK { + return &SearchIndexOK{} +} + +/* +SearchIndexOK describes a response with status code 200, with default header values. + +Returns zero or more entry UUIDs from the transparency log based on search query +*/ +type SearchIndexOK struct { + Payload []string +} + +// IsSuccess returns true when this search index o k response has a 2xx status code +func (o *SearchIndexOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this search index o k response has a 3xx status code +func (o *SearchIndexOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search index o k response has a 4xx status code +func (o *SearchIndexOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this search index o k response has a 5xx status code +func (o *SearchIndexOK) IsServerError() bool { + return false +} + +// IsCode returns true when this search index o k response a status code equal to that given +func (o *SearchIndexOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the search index o k response +func (o *SearchIndexOK) Code() int { + return 200 +} + +func (o *SearchIndexOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload) +} + +func (o *SearchIndexOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload) +} + +func (o *SearchIndexOK) GetPayload() []string { + return o.Payload +} + +func (o *SearchIndexOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchIndexBadRequest creates a SearchIndexBadRequest with default headers values +func NewSearchIndexBadRequest() *SearchIndexBadRequest { + return &SearchIndexBadRequest{} +} + +/* +SearchIndexBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type SearchIndexBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this search index bad request response has a 2xx status code +func (o *SearchIndexBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this search index bad request response has a 3xx status code +func (o *SearchIndexBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search index bad request response has a 4xx status code +func (o *SearchIndexBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this search index bad request response has a 5xx status code +func (o *SearchIndexBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this search index bad request response a status code equal to that given +func (o *SearchIndexBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the search index bad request response +func (o *SearchIndexBadRequest) Code() int { + return 400 +} + +func (o *SearchIndexBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload) +} + +func (o *SearchIndexBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload) +} + +func (o *SearchIndexBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchIndexBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewSearchIndexDefault creates a SearchIndexDefault with default headers values +func NewSearchIndexDefault(code int) *SearchIndexDefault { + return &SearchIndexDefault{ + _statusCode: code, + } +} + +/* +SearchIndexDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type SearchIndexDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this search index default response has a 2xx status code +func (o *SearchIndexDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this search index default response has a 3xx status code +func (o *SearchIndexDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this search index default response has a 4xx status code +func (o *SearchIndexDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this search index default response has a 5xx status code +func (o *SearchIndexDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this search index default response a status code equal to that given +func (o *SearchIndexDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the search index default response +func (o *SearchIndexDefault) Code() int { + return o._statusCode +} + +func (o *SearchIndexDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload) +} + +func (o *SearchIndexDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload) +} + +func (o *SearchIndexDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchIndexDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go new file mode 100644 index 00000000000..b4248c933a7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go @@ -0,0 +1,179 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package pubkey + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetPublicKeyParams creates a new GetPublicKeyParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetPublicKeyParams() *GetPublicKeyParams { + return &GetPublicKeyParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetPublicKeyParamsWithTimeout creates a new GetPublicKeyParams object +// with the ability to set a timeout on a request. +func NewGetPublicKeyParamsWithTimeout(timeout time.Duration) *GetPublicKeyParams { + return &GetPublicKeyParams{ + timeout: timeout, + } +} + +// NewGetPublicKeyParamsWithContext creates a new GetPublicKeyParams object +// with the ability to set a context for a request. +func NewGetPublicKeyParamsWithContext(ctx context.Context) *GetPublicKeyParams { + return &GetPublicKeyParams{ + Context: ctx, + } +} + +// NewGetPublicKeyParamsWithHTTPClient creates a new GetPublicKeyParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetPublicKeyParamsWithHTTPClient(client *http.Client) *GetPublicKeyParams { + return &GetPublicKeyParams{ + HTTPClient: client, + } +} + +/* +GetPublicKeyParams contains all the parameters to send to the API endpoint + + for the get public key operation. + + Typically these are written to a http.Request. +*/ +type GetPublicKeyParams struct { + + /* TreeID. + + The tree ID of the tree you wish to get a public key for + */ + TreeID *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get public key params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetPublicKeyParams) WithDefaults() *GetPublicKeyParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get public key params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetPublicKeyParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get public key params +func (o *GetPublicKeyParams) WithTimeout(timeout time.Duration) *GetPublicKeyParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get public key params +func (o *GetPublicKeyParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get public key params +func (o *GetPublicKeyParams) WithContext(ctx context.Context) *GetPublicKeyParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get public key params +func (o *GetPublicKeyParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get public key params +func (o *GetPublicKeyParams) WithHTTPClient(client *http.Client) *GetPublicKeyParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get public key params +func (o *GetPublicKeyParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTreeID adds the treeID to the get public key params +func (o *GetPublicKeyParams) WithTreeID(treeID *string) *GetPublicKeyParams { + o.SetTreeID(treeID) + return o +} + +// SetTreeID adds the treeId to the get public key params +func (o *GetPublicKeyParams) SetTreeID(treeID *string) { + o.TreeID = treeID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetPublicKeyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.TreeID != nil { + + // query param treeID + var qrTreeID string + + if o.TreeID != nil { + qrTreeID = *o.TreeID + } + qTreeID := qrTreeID + if qTreeID != "" { + + if err := r.SetQueryParam("treeID", qTreeID); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go new file mode 100644 index 00000000000..70dbc452a3b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package pubkey + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetPublicKeyReader is a Reader for the GetPublicKey structure. +type GetPublicKeyReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetPublicKeyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetPublicKeyOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGetPublicKeyDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetPublicKeyOK creates a GetPublicKeyOK with default headers values +func NewGetPublicKeyOK() *GetPublicKeyOK { + return &GetPublicKeyOK{} +} + +/* +GetPublicKeyOK describes a response with status code 200, with default header values. + +The public key +*/ +type GetPublicKeyOK struct { + Payload string +} + +// IsSuccess returns true when this get public key o k response has a 2xx status code +func (o *GetPublicKeyOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get public key o k response has a 3xx status code +func (o *GetPublicKeyOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get public key o k response has a 4xx status code +func (o *GetPublicKeyOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get public key o k response has a 5xx status code +func (o *GetPublicKeyOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get public key o k response a status code equal to that given +func (o *GetPublicKeyOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get public key o k response +func (o *GetPublicKeyOK) Code() int { + return 200 +} + +func (o *GetPublicKeyOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload) +} + +func (o *GetPublicKeyOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload) +} + +func (o *GetPublicKeyOK) GetPayload() string { + return o.Payload +} + +func (o *GetPublicKeyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetPublicKeyDefault creates a GetPublicKeyDefault with default headers values +func NewGetPublicKeyDefault(code int) *GetPublicKeyDefault { + return &GetPublicKeyDefault{ + _statusCode: code, + } +} + +/* +GetPublicKeyDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetPublicKeyDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get public key default response has a 2xx status code +func (o *GetPublicKeyDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get public key default response has a 3xx status code +func (o *GetPublicKeyDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get public key default response has a 4xx status code +func (o *GetPublicKeyDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get public key default response has a 5xx status code +func (o *GetPublicKeyDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get public key default response a status code equal to that given +func (o *GetPublicKeyDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get public key default response +func (o *GetPublicKeyDefault) Code() int { + return o._statusCode +} + +func (o *GetPublicKeyDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload) +} + +func (o *GetPublicKeyDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload) +} + +func (o *GetPublicKeyDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetPublicKeyDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go new file mode 100644 index 00000000000..7f4db7d8cf1 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go @@ -0,0 +1,149 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package pubkey + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new pubkey API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new pubkey API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new pubkey API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for pubkey API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// This client is generated with a few options you might find useful for your swagger spec. +// +// Feel free to add you own set of options. + +// WithAccept allows the client to force the Accept header +// to negotiate a specific Producer from the server. +// +// You may use this option to set arbitrary extensions to your MIME media type. +func WithAccept(mime string) ClientOption { + return func(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{mime} + } +} + +// WithAcceptApplicationJSON sets the Accept header to "application/json". +func WithAcceptApplicationJSON(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{"application/json"} +} + +// WithAcceptApplicationxPemFile sets the Accept header to "application/x-pem-file". +func WithAcceptApplicationxPemFile(r *runtime.ClientOperation) { + r.ProducesMediaTypes = []string{"application/x-pem-file"} +} + +// ClientService is the interface for Client methods +type ClientService interface { + GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetPublicKey retrieves the public key that can be used to validate the signed tree head + +Returns the public key that can be used to validate the signed tree head +*/ +func (a *Client) GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetPublicKeyParams() + } + op := &runtime.ClientOperation{ + ID: "getPublicKey", + Method: "GET", + PathPattern: "/api/v1/log/publicKey", + ProducesMediaTypes: []string{"application/x-pem-file"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetPublicKeyReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetPublicKeyOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetPublicKeyDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go new file mode 100644 index 00000000000..bee3811184a --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go @@ -0,0 +1,143 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/client/entries" + "github.com/sigstore/rekor/pkg/generated/client/index" + "github.com/sigstore/rekor/pkg/generated/client/pubkey" + "github.com/sigstore/rekor/pkg/generated/client/tlog" +) + +// Default rekor HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "rekor.sigstore.dev" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http"} + +// NewHTTPClient creates a new rekor HTTP client. +func NewHTTPClient(formats strfmt.Registry) *Rekor { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new rekor HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *Rekor { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new rekor client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Rekor { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(Rekor) + cli.Transport = transport + cli.Entries = entries.New(transport, formats) + cli.Index = index.New(transport, formats) + cli.Pubkey = pubkey.New(transport, formats) + cli.Tlog = tlog.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// Rekor is a client for rekor +type Rekor struct { + Entries entries.ClientService + + Index index.ClientService + + Pubkey pubkey.ClientService + + Tlog tlog.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *Rekor) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.Entries.SetTransport(transport) + c.Index.SetTransport(transport) + c.Pubkey.SetTransport(transport) + c.Tlog.SetTransport(transport) +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go new file mode 100644 index 00000000000..e0ae2cdd31e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go @@ -0,0 +1,144 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetLogInfoParams creates a new GetLogInfoParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogInfoParams() *GetLogInfoParams { + return &GetLogInfoParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogInfoParamsWithTimeout creates a new GetLogInfoParams object +// with the ability to set a timeout on a request. +func NewGetLogInfoParamsWithTimeout(timeout time.Duration) *GetLogInfoParams { + return &GetLogInfoParams{ + timeout: timeout, + } +} + +// NewGetLogInfoParamsWithContext creates a new GetLogInfoParams object +// with the ability to set a context for a request. +func NewGetLogInfoParamsWithContext(ctx context.Context) *GetLogInfoParams { + return &GetLogInfoParams{ + Context: ctx, + } +} + +// NewGetLogInfoParamsWithHTTPClient creates a new GetLogInfoParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogInfoParamsWithHTTPClient(client *http.Client) *GetLogInfoParams { + return &GetLogInfoParams{ + HTTPClient: client, + } +} + +/* +GetLogInfoParams contains all the parameters to send to the API endpoint + + for the get log info operation. + + Typically these are written to a http.Request. +*/ +type GetLogInfoParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogInfoParams) WithDefaults() *GetLogInfoParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogInfoParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get log info params +func (o *GetLogInfoParams) WithTimeout(timeout time.Duration) *GetLogInfoParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log info params +func (o *GetLogInfoParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log info params +func (o *GetLogInfoParams) WithContext(ctx context.Context) *GetLogInfoParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log info params +func (o *GetLogInfoParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log info params +func (o *GetLogInfoParams) WithHTTPClient(client *http.Client) *GetLogInfoParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log info params +func (o *GetLogInfoParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go new file mode 100644 index 00000000000..3d98f88cdfd --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go @@ -0,0 +1,204 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogInfoReader is a Reader for the GetLogInfo structure. +type GetLogInfoReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogInfoOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGetLogInfoDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogInfoOK creates a GetLogInfoOK with default headers values +func NewGetLogInfoOK() *GetLogInfoOK { + return &GetLogInfoOK{} +} + +/* +GetLogInfoOK describes a response with status code 200, with default header values. + +A JSON object with the root hash and tree size as properties +*/ +type GetLogInfoOK struct { + Payload *models.LogInfo +} + +// IsSuccess returns true when this get log info o k response has a 2xx status code +func (o *GetLogInfoOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log info o k response has a 3xx status code +func (o *GetLogInfoOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log info o k response has a 4xx status code +func (o *GetLogInfoOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log info o k response has a 5xx status code +func (o *GetLogInfoOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log info o k response a status code equal to that given +func (o *GetLogInfoOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log info o k response +func (o *GetLogInfoOK) Code() int { + return 200 +} + +func (o *GetLogInfoOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload) +} + +func (o *GetLogInfoOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload) +} + +func (o *GetLogInfoOK) GetPayload() *models.LogInfo { + return o.Payload +} + +func (o *GetLogInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.LogInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogInfoDefault creates a GetLogInfoDefault with default headers values +func NewGetLogInfoDefault(code int) *GetLogInfoDefault { + return &GetLogInfoDefault{ + _statusCode: code, + } +} + +/* +GetLogInfoDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogInfoDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log info default response has a 2xx status code +func (o *GetLogInfoDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log info default response has a 3xx status code +func (o *GetLogInfoDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log info default response has a 4xx status code +func (o *GetLogInfoDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log info default response has a 5xx status code +func (o *GetLogInfoDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log info default response a status code equal to that given +func (o *GetLogInfoDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log info default response +func (o *GetLogInfoDefault) Code() int { + return o._statusCode +} + +func (o *GetLogInfoDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload) +} + +func (o *GetLogInfoDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload) +} + +func (o *GetLogInfoDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go new file mode 100644 index 00000000000..2b21ad887c1 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go @@ -0,0 +1,255 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetLogProofParams creates a new GetLogProofParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetLogProofParams() *GetLogProofParams { + return &GetLogProofParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetLogProofParamsWithTimeout creates a new GetLogProofParams object +// with the ability to set a timeout on a request. +func NewGetLogProofParamsWithTimeout(timeout time.Duration) *GetLogProofParams { + return &GetLogProofParams{ + timeout: timeout, + } +} + +// NewGetLogProofParamsWithContext creates a new GetLogProofParams object +// with the ability to set a context for a request. +func NewGetLogProofParamsWithContext(ctx context.Context) *GetLogProofParams { + return &GetLogProofParams{ + Context: ctx, + } +} + +// NewGetLogProofParamsWithHTTPClient creates a new GetLogProofParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetLogProofParamsWithHTTPClient(client *http.Client) *GetLogProofParams { + return &GetLogProofParams{ + HTTPClient: client, + } +} + +/* +GetLogProofParams contains all the parameters to send to the API endpoint + + for the get log proof operation. + + Typically these are written to a http.Request. +*/ +type GetLogProofParams struct { + + /* FirstSize. + + The size of the tree that you wish to prove consistency from (1 means the beginning of the log) Defaults to 1 if not specified + + + Default: 1 + */ + FirstSize *int64 + + /* LastSize. + + The size of the tree that you wish to prove consistency to + */ + LastSize int64 + + /* TreeID. + + The tree ID of the tree that you wish to prove consistency for + */ + TreeID *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get log proof params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogProofParams) WithDefaults() *GetLogProofParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get log proof params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetLogProofParams) SetDefaults() { + var ( + firstSizeDefault = int64(1) + ) + + val := GetLogProofParams{ + FirstSize: &firstSizeDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the get log proof params +func (o *GetLogProofParams) WithTimeout(timeout time.Duration) *GetLogProofParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get log proof params +func (o *GetLogProofParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get log proof params +func (o *GetLogProofParams) WithContext(ctx context.Context) *GetLogProofParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get log proof params +func (o *GetLogProofParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get log proof params +func (o *GetLogProofParams) WithHTTPClient(client *http.Client) *GetLogProofParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get log proof params +func (o *GetLogProofParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithFirstSize adds the firstSize to the get log proof params +func (o *GetLogProofParams) WithFirstSize(firstSize *int64) *GetLogProofParams { + o.SetFirstSize(firstSize) + return o +} + +// SetFirstSize adds the firstSize to the get log proof params +func (o *GetLogProofParams) SetFirstSize(firstSize *int64) { + o.FirstSize = firstSize +} + +// WithLastSize adds the lastSize to the get log proof params +func (o *GetLogProofParams) WithLastSize(lastSize int64) *GetLogProofParams { + o.SetLastSize(lastSize) + return o +} + +// SetLastSize adds the lastSize to the get log proof params +func (o *GetLogProofParams) SetLastSize(lastSize int64) { + o.LastSize = lastSize +} + +// WithTreeID adds the treeID to the get log proof params +func (o *GetLogProofParams) WithTreeID(treeID *string) *GetLogProofParams { + o.SetTreeID(treeID) + return o +} + +// SetTreeID adds the treeId to the get log proof params +func (o *GetLogProofParams) SetTreeID(treeID *string) { + o.TreeID = treeID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetLogProofParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.FirstSize != nil { + + // query param firstSize + var qrFirstSize int64 + + if o.FirstSize != nil { + qrFirstSize = *o.FirstSize + } + qFirstSize := swag.FormatInt64(qrFirstSize) + if qFirstSize != "" { + + if err := r.SetQueryParam("firstSize", qFirstSize); err != nil { + return err + } + } + } + + // query param lastSize + qrLastSize := o.LastSize + qLastSize := swag.FormatInt64(qrLastSize) + if qLastSize != "" { + + if err := r.SetQueryParam("lastSize", qLastSize); err != nil { + return err + } + } + + if o.TreeID != nil { + + // query param treeID + var qrTreeID string + + if o.TreeID != nil { + qrTreeID = *o.TreeID + } + qTreeID := qrTreeID + if qTreeID != "" { + + if err := r.SetQueryParam("treeID", qTreeID); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go new file mode 100644 index 00000000000..ae8e50d2879 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go @@ -0,0 +1,280 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + stderrors "errors" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// GetLogProofReader is a Reader for the GetLogProof structure. +type GetLogProofReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetLogProofReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) { + switch response.Code() { + case 200: + result := NewGetLogProofOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetLogProofBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + result := NewGetLogProofDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetLogProofOK creates a GetLogProofOK with default headers values +func NewGetLogProofOK() *GetLogProofOK { + return &GetLogProofOK{} +} + +/* +GetLogProofOK describes a response with status code 200, with default header values. + +All hashes required to compute the consistency proof +*/ +type GetLogProofOK struct { + Payload *models.ConsistencyProof +} + +// IsSuccess returns true when this get log proof o k response has a 2xx status code +func (o *GetLogProofOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get log proof o k response has a 3xx status code +func (o *GetLogProofOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log proof o k response has a 4xx status code +func (o *GetLogProofOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get log proof o k response has a 5xx status code +func (o *GetLogProofOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get log proof o k response a status code equal to that given +func (o *GetLogProofOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get log proof o k response +func (o *GetLogProofOK) Code() int { + return 200 +} + +func (o *GetLogProofOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload) +} + +func (o *GetLogProofOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload) +} + +func (o *GetLogProofOK) GetPayload() *models.ConsistencyProof { + return o.Payload +} + +func (o *GetLogProofOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ConsistencyProof) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogProofBadRequest creates a GetLogProofBadRequest with default headers values +func NewGetLogProofBadRequest() *GetLogProofBadRequest { + return &GetLogProofBadRequest{} +} + +/* +GetLogProofBadRequest describes a response with status code 400, with default header values. + +The content supplied to the server was invalid +*/ +type GetLogProofBadRequest struct { + Payload *models.Error +} + +// IsSuccess returns true when this get log proof bad request response has a 2xx status code +func (o *GetLogProofBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get log proof bad request response has a 3xx status code +func (o *GetLogProofBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get log proof bad request response has a 4xx status code +func (o *GetLogProofBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get log proof bad request response has a 5xx status code +func (o *GetLogProofBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get log proof bad request response a status code equal to that given +func (o *GetLogProofBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get log proof bad request response +func (o *GetLogProofBadRequest) Code() int { + return 400 +} + +func (o *GetLogProofBadRequest) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload) +} + +func (o *GetLogProofBadRequest) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload) +} + +func (o *GetLogProofBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogProofBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} + +// NewGetLogProofDefault creates a GetLogProofDefault with default headers values +func NewGetLogProofDefault(code int) *GetLogProofDefault { + return &GetLogProofDefault{ + _statusCode: code, + } +} + +/* +GetLogProofDefault describes a response with status code -1, with default header values. + +There was an internal error in the server while processing the request +*/ +type GetLogProofDefault struct { + _statusCode int + + Payload *models.Error +} + +// IsSuccess returns true when this get log proof default response has a 2xx status code +func (o *GetLogProofDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this get log proof default response has a 3xx status code +func (o *GetLogProofDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this get log proof default response has a 4xx status code +func (o *GetLogProofDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this get log proof default response has a 5xx status code +func (o *GetLogProofDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this get log proof default response a status code equal to that given +func (o *GetLogProofDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the get log proof default response +func (o *GetLogProofDefault) Code() int { + return o._statusCode +} + +func (o *GetLogProofDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload) +} + +func (o *GetLogProofDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload) +} + +func (o *GetLogProofDefault) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetLogProofDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go new file mode 100644 index 00000000000..ff174ebfa23 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go @@ -0,0 +1,171 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tlog + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// New creates a new tlog API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +// New creates a new tlog API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new tlog API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + +/* +Client for tlog API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption may be used to customize the behavior of Client methods. +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetLogInfo(params *GetLogInfoParams, opts ...ClientOption) (*GetLogInfoOK, error) + + GetLogProof(params *GetLogProofParams, opts ...ClientOption) (*GetLogProofOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetLogInfo gets information about the current state of the transparency log + +Returns the current root hash and size of the merkle tree used to store the log entries. +*/ +func (a *Client) GetLogInfo(params *GetLogInfoParams, opts ...ClientOption) (*GetLogInfoOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogInfoParams() + } + op := &runtime.ClientOperation{ + ID: "getLogInfo", + Method: "GET", + PathPattern: "/api/v1/log", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogInfoReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogInfoOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogInfoDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GetLogProof gets information required to generate a consistency proof for the transparency log + +Returns a list of hashes for specified tree sizes that can be used to confirm the consistency of the transparency log +*/ +func (a *Client) GetLogProof(params *GetLogProofParams, opts ...ClientOption) (*GetLogProofOK, error) { + // NOTE: parameters are not validated before sending + if params == nil { + params = NewGetLogProofParams() + } + op := &runtime.ClientOperation{ + ID: "getLogProof", + Method: "GET", + PathPattern: "/api/v1/log/proof", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetLogProofReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + + // only one success response has to be checked + success, ok := result.(*GetLogProofOK) + if ok { + return success, nil + } + + // unexpected success response. + // + // a default response is provided: fill this and return an error + unexpectedSuccess := result.(*GetLogProofDefault) + + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go new file mode 100644 index 00000000000..5607679fdf7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Alpine Alpine package +// +// swagger:model alpine +type Alpine struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec AlpineSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Alpine) Kind() string { + return "alpine" +} + +// SetKind sets the kind of this subtype +func (m *Alpine) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Alpine) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec AlpineSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Alpine + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Alpine) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec AlpineSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this alpine +func (m *Alpine) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Alpine) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Alpine) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this alpine based on the context it is used +func (m *Alpine) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Alpine) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Alpine) UnmarshalBinary(b []byte) error { + var res Alpine + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go new file mode 100644 index 00000000000..00f76926c39 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// AlpineSchema Alpine Package Schema +// +// # Schema for Alpine package objects +// +// swagger:model alpineSchema +type AlpineSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go new file mode 100644 index 00000000000..c77008ce7a8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go @@ -0,0 +1,480 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AlpineV001Schema Alpine v0.0.1 Schema +// +// # Schema for Alpine Package entries +// +// swagger:model alpineV001Schema +type AlpineV001Schema struct { + + // package + // Required: true + Package *AlpineV001SchemaPackage `json:"package"` + + // public key + // Required: true + PublicKey *AlpineV001SchemaPublicKey `json:"publicKey"` +} + +// Validate validates this alpine v001 schema +func (m *AlpineV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePackage(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001Schema) validatePackage(formats strfmt.Registry) error { + + if err := validate.Required("package", "body", m.Package); err != nil { + return err + } + + if m.Package != nil { + if err := m.Package.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *AlpineV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this alpine v001 schema based on the context it is used +func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePackage(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { + + if m.Package != nil { + + if err := m.Package.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001Schema) UnmarshalBinary(b []byte) error { + var res AlpineV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// AlpineV001SchemaPackage Information about the package associated with the entry +// +// swagger:model AlpineV001SchemaPackage +type AlpineV001SchemaPackage struct { + + // Specifies the package inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *AlpineV001SchemaPackageHash `json:"hash,omitempty"` + + // Values of the .PKGINFO key / value pairs + // Read Only: true + Pkginfo map[string]string `json:"pkginfo,omitempty"` +} + +// Validate validates this alpine v001 schema package +func (m *AlpineV001SchemaPackage) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001SchemaPackage) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this alpine v001 schema package based on the context it is used +func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePkginfo(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *AlpineV001SchemaPackage) contextValidatePkginfo(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001SchemaPackage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001SchemaPackage) UnmarshalBinary(b []byte) error { + var res AlpineV001SchemaPackage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// AlpineV001SchemaPackageHash Specifies the hash algorithm and value for the package +// +// swagger:model AlpineV001SchemaPackageHash +type AlpineV001SchemaPackageHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the package + // Required: true + Value *string `json:"value"` +} + +// Validate validates this alpine v001 schema package hash +func (m *AlpineV001SchemaPackageHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var alpineV001SchemaPackageHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + alpineV001SchemaPackageHashTypeAlgorithmPropEnum = append(alpineV001SchemaPackageHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // AlpineV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" + AlpineV001SchemaPackageHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *AlpineV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, alpineV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *AlpineV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *AlpineV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this alpine v001 schema package hash based on the context it is used +func (m *AlpineV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001SchemaPackageHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001SchemaPackageHash) UnmarshalBinary(b []byte) error { + var res AlpineV001SchemaPackageHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// AlpineV001SchemaPublicKey The public key that can verify the package signature +// +// swagger:model AlpineV001SchemaPublicKey +type AlpineV001SchemaPublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this alpine v001 schema public key +func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AlpineV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this alpine v001 schema public key based on context it is used +func (m *AlpineV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AlpineV001SchemaPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AlpineV001SchemaPublicKey) UnmarshalBinary(b []byte) error { + var res AlpineV001SchemaPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go new file mode 100644 index 00000000000..804ddd11a96 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go @@ -0,0 +1,118 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ConsistencyProof consistency proof +// +// swagger:model ConsistencyProof +type ConsistencyProof struct { + + // hashes + // Required: true + Hashes []string `json:"hashes"` + + // The hash value stored at the root of the merkle tree at the time the proof was generated + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` +} + +// Validate validates this consistency proof +func (m *ConsistencyProof) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHashes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ConsistencyProof) validateHashes(formats strfmt.Registry) error { + + if err := validate.Required("hashes", "body", m.Hashes); err != nil { + return err + } + + for i := 0; i < len(m.Hashes); i++ { + + if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + } + + return nil +} + +func (m *ConsistencyProof) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this consistency proof based on context it is used +func (m *ConsistencyProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ConsistencyProof) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ConsistencyProof) UnmarshalBinary(b []byte) error { + var res ConsistencyProof + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go new file mode 100644 index 00000000000..8de4083baf8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Cose COSE object +// +// swagger:model cose +type Cose struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Cose) Kind() string { + return "cose" +} + +// SetKind sets the kind of this subtype +func (m *Cose) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Cose) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Cose + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Cose) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this cose +func (m *Cose) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Cose) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Cose) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this cose based on the context it is used +func (m *Cose) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Cose) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Cose) UnmarshalBinary(b []byte) error { + var res Cose + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go new file mode 100644 index 00000000000..8f901605038 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CoseSchema COSE Schema +// +// # COSE for Rekord objects +// +// swagger:model coseSchema +type CoseSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go new file mode 100644 index 00000000000..9dafe29ce37 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -0,0 +1,546 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CoseV001Schema cose v0.0.1 Schema +// +// # Schema for cose object +// +// swagger:model coseV001Schema +type CoseV001Schema struct { + + // data + Data *CoseV001SchemaData `json:"data,omitempty"` + + // The COSE Sign1 Message + // Format: byte + Message strfmt.Base64 `json:"message,omitempty"` + + // The public key that can verify the signature + // Required: true + // Format: byte + PublicKey *strfmt.Base64 `json:"publicKey"` +} + +// Validate validates this cose v001 schema +func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { + if swag.IsZero(m.Data) { // not required + return nil + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *CoseV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema based on the context it is used +func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + + if swag.IsZero(m.Data) { // not required + return nil + } + + if err := m.Data.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001Schema) UnmarshalBinary(b []byte) error { + var res CoseV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaData Information about the content associated with the entry +// +// swagger:model CoseV001SchemaData +type CoseV001SchemaData struct { + + // Specifies the additional authenticated data required to verify the signature + // Format: byte + Aad strfmt.Base64 `json:"aad,omitempty"` + + // envelope hash + EnvelopeHash *CoseV001SchemaDataEnvelopeHash `json:"envelopeHash,omitempty"` + + // payload hash + PayloadHash *CoseV001SchemaDataPayloadHash `json:"payloadHash,omitempty"` +} + +// Validate validates this cose v001 schema data +func (m *CoseV001SchemaData) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelopeHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001SchemaData) validateEnvelopeHash(formats strfmt.Registry) error { + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if m.EnvelopeHash != nil { + if err := m.EnvelopeHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *CoseV001SchemaData) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this cose v001 schema data based on the context it is used +func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { + + if m.EnvelopeHash != nil { + + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaData) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaDataEnvelopeHash Specifies the hash algorithm and value for the COSE envelope +// +// swagger:model CoseV001SchemaDataEnvelopeHash +type CoseV001SchemaDataEnvelopeHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the envelope + // Required: true + Value *string `json:"value"` +} + +// Validate validates this cose v001 schema data envelope hash +func (m *CoseV001SchemaDataEnvelopeHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum = append(coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // CoseV001SchemaDataEnvelopeHashAlgorithmSha256 captures enum value "sha256" + CoseV001SchemaDataEnvelopeHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *CoseV001SchemaDataEnvelopeHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"envelopeHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema data envelope hash based on the context it is used +func (m *CoseV001SchemaDataEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaDataEnvelopeHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaDataEnvelopeHash) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaDataEnvelopeHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaDataPayloadHash Specifies the hash algorithm and value for the content +// +// swagger:model CoseV001SchemaDataPayloadHash +type CoseV001SchemaDataPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the content + // Required: true + Value *string `json:"value"` +} + +// Validate validates this cose v001 schema data payload hash +func (m *CoseV001SchemaDataPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum = append(coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // CoseV001SchemaDataPayloadHashAlgorithmSha256 captures enum value "sha256" + CoseV001SchemaDataPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *CoseV001SchemaDataPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *CoseV001SchemaDataPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *CoseV001SchemaDataPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema data payload hash based on the context it is used +func (m *CoseV001SchemaDataPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaDataPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaDataPayloadHash) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaDataPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go new file mode 100644 index 00000000000..dde562054c3 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DSSE DSSE envelope +// +// swagger:model dsse +type DSSE struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec DSSESchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *DSSE) Kind() string { + return "dsse" +} + +// SetKind sets the kind of this subtype +func (m *DSSE) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *DSSE) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec DSSESchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result DSSE + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m DSSE) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec DSSESchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this dsse +func (m *DSSE) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *DSSE) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this dsse based on the context it is used +func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *DSSE) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSE) UnmarshalBinary(b []byte) error { + var res DSSE + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go new file mode 100644 index 00000000000..0dc5c87ed37 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DSSESchema DSSE Schema +// +// log entry schema for dsse envelopes +// +// swagger:model dsseSchema +type DSSESchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go new file mode 100644 index 00000000000..8cad568daf8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go @@ -0,0 +1,718 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DSSEV001Schema DSSE v0.0.1 Schema +// +// # Schema for DSSE envelopes +// +// swagger:model dsseV001Schema +type DSSEV001Schema struct { + + // envelope hash + EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"` + + // payload hash + PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"` + + // proposed content + ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"` + + // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings + // Read Only: true + // Min Items: 1 + Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"` +} + +// Validate validates this dsse v001 schema +func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelopeHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProposedContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignatures(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error { + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if m.EnvelopeHash != nil { + if err := m.EnvelopeHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("payloadHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error { + if swag.IsZero(m.ProposedContent) { // not required + return nil + } + + if m.ProposedContent != nil { + if err := m.ProposedContent.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("proposedContent") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("proposedContent") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error { + if swag.IsZero(m.Signatures) { // not required + return nil + } + + iSignaturesSize := int64(len(m.Signatures)) + + if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil { + return err + } + + for i := 0; i < len(m.Signatures); i++ { + if swag.IsZero(m.Signatures[i]) { // not required + continue + } + + if m.Signatures[i] != nil { + if err := m.Signatures[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// ContextValidate validate this dsse v001 schema based on the context it is used +func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateProposedContent(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignatures(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { + + if m.EnvelopeHash != nil { + + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("envelopeHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("envelopeHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("payloadHash") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error { + + if m.ProposedContent != nil { + + if swag.IsZero(m.ProposedContent) { // not required + return nil + } + + if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("proposedContent") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("proposedContent") + } + + return err + } + } + + return nil +} + +func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "signatures", "body", m.Signatures); err != nil { + return err + } + + for i := 0; i < len(m.Signatures); i++ { + + if m.Signatures[i] != nil { + + if swag.IsZero(m.Signatures[i]) { // not required + return nil + } + + if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error { + var res DSSEV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor +// +// swagger:model DSSEV001SchemaEnvelopeHash +type DSSEV001SchemaEnvelopeHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The value of the computed digest over the entire envelope + // Required: true + Value *string `json:"value"` +} + +// Validate validates this DSSE v001 schema envelope hash +func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256" + DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used +func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaEnvelopeHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope +// +// swagger:model DSSEV001SchemaPayloadHash +type DSSEV001SchemaPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The value of the computed digest over the payload within the envelope + // Required: true + Value *string `json:"value"` +} + +// Validate validates this DSSE v001 schema payload hash +func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256" + DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used +func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaProposedContent DSSE v001 schema proposed content +// +// swagger:model DSSEV001SchemaProposedContent +type DSSEV001SchemaProposedContent struct { + + // DSSE envelope specified as a stringified JSON object + // Required: true + Envelope *string `json:"envelope"` + + // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings + // Required: true + // Min Items: 1 + Verifiers []strfmt.Base64 `json:"verifiers"` +} + +// Validate validates this DSSE v001 schema proposed content +func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVerifiers(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error { + + if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error { + + if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil { + return err + } + + iVerifiersSize := int64(len(m.Verifiers)) + + if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this DSSE v001 schema proposed content based on context it is used +func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaProposedContent + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature +// +// swagger:model DSSEV001SchemaSignaturesItems0 +type DSSEV001SchemaSignaturesItems0 struct { + + // base64 encoded signature of the payload + // Required: true + // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$ + Signature *string `json:"signature"` + + // verification material that was used to verify the corresponding signature, specified as a base64 encoded string + // Required: true + // Format: byte + Verifier *strfmt.Base64 `json:"verifier"` +} + +// Validate validates this DSSE v001 schema signatures items0 +func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVerifier(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error { + + if err := validate.Required("signature", "body", m.Signature); err != nil { + return err + } + + if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil { + return err + } + + return nil +} + +func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error { + + if err := validate.Required("verifier", "body", m.Verifier); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used +func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error { + var res DSSEV001SchemaSignaturesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go new file mode 100644 index 00000000000..ac14f2026e4 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go @@ -0,0 +1,69 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Error error +// +// swagger:model Error +type Error struct { + + // code + Code int64 `json:"code,omitempty"` + + // message + Message string `json:"message,omitempty"` +} + +// Validate validates this error +func (m *Error) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this error based on context it is used +func (m *Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Error) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Error) UnmarshalBinary(b []byte) error { + var res Error + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go new file mode 100644 index 00000000000..b3e1f8a3bda --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Hashedrekord Hashed Rekord object +// +// swagger:model hashedrekord +type Hashedrekord struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HashedrekordSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Hashedrekord) Kind() string { + return "hashedrekord" +} + +// SetKind sets the kind of this subtype +func (m *Hashedrekord) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Hashedrekord) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HashedrekordSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Hashedrekord + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Hashedrekord) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HashedrekordSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this hashedrekord +func (m *Hashedrekord) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Hashedrekord) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Hashedrekord) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this hashedrekord based on the context it is used +func (m *Hashedrekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Hashedrekord) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Hashedrekord) UnmarshalBinary(b []byte) error { + var res Hashedrekord + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go new file mode 100644 index 00000000000..67fc8ababad --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// HashedrekordSchema Hashedrekord Schema +// +// # Schema for Hashedrekord objects +// +// swagger:model hashedrekordSchema +type HashedrekordSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go new file mode 100644 index 00000000000..866842e5690 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go @@ -0,0 +1,552 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HashedrekordV001Schema Hashed Rekor v0.0.1 Schema +// +// # Schema for Hashed Rekord object +// +// swagger:model hashedrekordV001Schema +type HashedrekordV001Schema struct { + + // data + // Required: true + Data *HashedrekordV001SchemaData `json:"data"` + + // signature + // Required: true + Signature *HashedrekordV001SchemaSignature `json:"signature"` +} + +// Validate validates this hashedrekord v001 schema +func (m *HashedrekordV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001Schema) validateData(formats strfmt.Registry) error { + + if err := validate.Required("data", "body", m.Data); err != nil { + return err + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *HashedrekordV001Schema) validateSignature(formats strfmt.Registry) error { + + if err := validate.Required("signature", "body", m.Signature); err != nil { + return err + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this hashedrekord v001 schema based on the context it is used +func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + + if err := m.Data.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001Schema) UnmarshalBinary(b []byte) error { + var res HashedrekordV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaData Information about the content associated with the entry +// +// swagger:model HashedrekordV001SchemaData +type HashedrekordV001SchemaData struct { + + // hash + Hash *HashedrekordV001SchemaDataHash `json:"hash,omitempty"` +} + +// Validate validates this hashedrekord v001 schema data +func (m *HashedrekordV001SchemaData) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaData) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this hashedrekord v001 schema data based on the context it is used +func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaData) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaDataHash Specifies the hash algorithm and value for the content +// +// swagger:model HashedrekordV001SchemaDataHash +type HashedrekordV001SchemaDataHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256","sha384","sha512"] + Algorithm *string `json:"algorithm"` + + // The hash value for the content, as represented by a lower case hexadecimal string + // Required: true + Value *string `json:"value"` +} + +// Validate validates this hashedrekord v001 schema data hash +func (m *HashedrekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256","sha384","sha512"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum = append(hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" + HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" + + // HashedrekordV001SchemaDataHashAlgorithmSha384 captures enum value "sha384" + HashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" + + // HashedrekordV001SchemaDataHashAlgorithmSha512 captures enum value "sha512" + HashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" +) + +// prop value enum +func (m *HashedrekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *HashedrekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *HashedrekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this hashedrekord v001 schema data hash based on context it is used +func (m *HashedrekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaDataHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaSignature Information about the detached signature associated with the entry +// +// swagger:model HashedrekordV001SchemaSignature +type HashedrekordV001SchemaSignature struct { + + // Specifies the content of the signature inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // public key + PublicKey *HashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` +} + +// Validate validates this hashedrekord v001 schema signature +func (m *HashedrekordV001SchemaSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this hashedrekord v001 schema signature based on the context it is used +func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignature) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information +// +// swagger:model HashedrekordV001SchemaSignaturePublicKey +type HashedrekordV001SchemaSignaturePublicKey struct { + + // Specifies the content of the public key or code signing certificate inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` +} + +// Validate validates this hashedrekord v001 schema signature public key +func (m *HashedrekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this hashedrekord v001 schema signature public key based on context it is used +func (m *HashedrekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HashedrekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { + var res HashedrekordV001SchemaSignaturePublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go new file mode 100644 index 00000000000..d19b8bc8c9d --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Helm Helm chart +// +// swagger:model helm +type Helm struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HelmSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Helm) Kind() string { + return "helm" +} + +// SetKind sets the kind of this subtype +func (m *Helm) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Helm) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HelmSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Helm + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Helm) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec HelmSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this helm +func (m *Helm) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Helm) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Helm) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this helm based on the context it is used +func (m *Helm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Helm) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Helm) UnmarshalBinary(b []byte) error { + var res Helm + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go new file mode 100644 index 00000000000..305a9e16fde --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// HelmSchema Helm Schema +// +// # Schema for Helm objects +// +// swagger:model helmSchema +type HelmSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go new file mode 100644 index 00000000000..1d52e1e4f56 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go @@ -0,0 +1,703 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HelmV001Schema Helm v0.0.1 Schema +// +// # Schema for Helm object +// +// swagger:model helmV001Schema +type HelmV001Schema struct { + + // chart + // Required: true + Chart *HelmV001SchemaChart `json:"chart"` + + // public key + // Required: true + PublicKey *HelmV001SchemaPublicKey `json:"publicKey"` +} + +// Validate validates this helm v001 schema +func (m *HelmV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateChart(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001Schema) validateChart(formats strfmt.Registry) error { + + if err := validate.Required("chart", "body", m.Chart); err != nil { + return err + } + + if m.Chart != nil { + if err := m.Chart.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart") + } + + return err + } + } + + return nil +} + +func (m *HelmV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this helm v001 schema based on the context it is used +func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateChart(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error { + + if m.Chart != nil { + + if err := m.Chart.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart") + } + + return err + } + } + + return nil +} + +func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001Schema) UnmarshalBinary(b []byte) error { + var res HelmV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChart Information about the Helm chart associated with the entry +// +// swagger:model HelmV001SchemaChart +type HelmV001SchemaChart struct { + + // hash + Hash *HelmV001SchemaChartHash `json:"hash,omitempty"` + + // provenance + // Required: true + Provenance *HelmV001SchemaChartProvenance `json:"provenance"` +} + +// Validate validates this helm v001 schema chart +func (m *HelmV001SchemaChart) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProvenance(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChart) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *HelmV001SchemaChart) validateProvenance(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"provenance", "body", m.Provenance); err != nil { + return err + } + + if m.Provenance != nil { + if err := m.Provenance.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart based on the context it is used +func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateProvenance(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error { + + if m.Provenance != nil { + + if err := m.Provenance.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChart) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChart) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChart + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChartHash Specifies the hash algorithm and value for the chart +// +// swagger:model HelmV001SchemaChartHash +type HelmV001SchemaChartHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the chart + // Required: true + Value *string `json:"value"` +} + +// Validate validates this helm v001 schema chart hash +func (m *HelmV001SchemaChartHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var helmV001SchemaChartHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + helmV001SchemaChartHashTypeAlgorithmPropEnum = append(helmV001SchemaChartHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // HelmV001SchemaChartHashAlgorithmSha256 captures enum value "sha256" + HelmV001SchemaChartHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *HelmV001SchemaChartHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, helmV001SchemaChartHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *HelmV001SchemaChartHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("chart"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *HelmV001SchemaChartHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart hash based on the context it is used +func (m *HelmV001SchemaChartHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChartHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChartHash) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChartHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChartProvenance The provenance entry associated with the signed Helm Chart +// +// swagger:model HelmV001SchemaChartProvenance +type HelmV001SchemaChartProvenance struct { + + // Specifies the content of the provenance file inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // signature + Signature *HelmV001SchemaChartProvenanceSignature `json:"signature,omitempty"` +} + +// Validate validates this helm v001 schema chart provenance +func (m *HelmV001SchemaChartProvenance) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenance) validateSignature(formats strfmt.Registry) error { + if swag.IsZero(m.Signature) { // not required + return nil + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart provenance based on the context it is used +func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if swag.IsZero(m.Signature) { // not required + return nil + } + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenance) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChartProvenance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaChartProvenanceSignature Information about the included signature in the provenance file +// +// swagger:model HelmV001SchemaChartProvenanceSignature +type HelmV001SchemaChartProvenanceSignature struct { + + // Specifies the signature embedded within the provenance file + // Required: true + // Read Only: true + // Format: byte + Content strfmt.Base64 `json:"content"` +} + +// Validate validates this helm v001 schema chart provenance signature +func (m *HelmV001SchemaChartProvenanceSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenanceSignature) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this helm v001 schema chart provenance signature based on the context it is used +func (m *HelmV001SchemaChartProvenanceSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaChartProvenanceSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenanceSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaChartProvenanceSignature) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaChartProvenanceSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// HelmV001SchemaPublicKey The public key that can verify the package signature +// +// swagger:model HelmV001SchemaPublicKey +type HelmV001SchemaPublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this helm v001 schema public key +func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HelmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this helm v001 schema public key based on context it is used +func (m *HelmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HelmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HelmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { + var res HelmV001SchemaPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go new file mode 100644 index 00000000000..c555eb2da64 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// InactiveShardLogInfo inactive shard log info +// +// swagger:model InactiveShardLogInfo +type InactiveShardLogInfo struct { + + // The current hash value stored at the root of the merkle tree + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` + + // The current signed tree head + // Required: true + SignedTreeHead *string `json:"signedTreeHead"` + + // The current treeID + // Required: true + // Pattern: ^[0-9]+$ + TreeID *string `json:"treeID"` + + // The current number of nodes in the merkle tree + // Required: true + // Minimum: 1 + TreeSize *int64 `json:"treeSize"` +} + +// Validate validates this inactive shard log info +func (m *InactiveShardLogInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignedTreeHead(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *InactiveShardLogInfo) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateSignedTreeHead(formats strfmt.Registry) error { + + if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateTreeID(formats strfmt.Registry) error { + + if err := validate.Required("treeID", "body", m.TreeID); err != nil { + return err + } + + if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateTreeSize(formats strfmt.Registry) error { + + if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { + return err + } + + if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this inactive shard log info based on context it is used +func (m *InactiveShardLogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *InactiveShardLogInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *InactiveShardLogInfo) UnmarshalBinary(b []byte) error { + var res InactiveShardLogInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go new file mode 100644 index 00000000000..86f0d7b94e2 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go @@ -0,0 +1,179 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// InclusionProof inclusion proof +// +// swagger:model InclusionProof +type InclusionProof struct { + + // The checkpoint (signed tree head) that the inclusion proof is based on + // Required: true + Checkpoint *string `json:"checkpoint"` + + // A list of hashes required to compute the inclusion proof, sorted in order from leaf to root + // Required: true + Hashes []string `json:"hashes"` + + // The index of the entry in the transparency log + // Required: true + // Minimum: 0 + LogIndex *int64 `json:"logIndex"` + + // The hash value stored at the root of the merkle tree at the time the proof was generated + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` + + // The size of the merkle tree at the time the inclusion proof was generated + // Required: true + // Minimum: 1 + TreeSize *int64 `json:"treeSize"` +} + +// Validate validates this inclusion proof +func (m *InclusionProof) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCheckpoint(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHashes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogIndex(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *InclusionProof) validateCheckpoint(formats strfmt.Registry) error { + + if err := validate.Required("checkpoint", "body", m.Checkpoint); err != nil { + return err + } + + return nil +} + +func (m *InclusionProof) validateHashes(formats strfmt.Registry) error { + + if err := validate.Required("hashes", "body", m.Hashes); err != nil { + return err + } + + for i := 0; i < len(m.Hashes); i++ { + + if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + } + + return nil +} + +func (m *InclusionProof) validateLogIndex(formats strfmt.Registry) error { + + if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { + return err + } + + if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { + return err + } + + return nil +} + +func (m *InclusionProof) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *InclusionProof) validateTreeSize(formats strfmt.Registry) error { + + if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { + return err + } + + if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this inclusion proof based on context it is used +func (m *InclusionProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *InclusionProof) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *InclusionProof) UnmarshalBinary(b []byte) error { + var res InclusionProof + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go new file mode 100644 index 00000000000..4f208de1d55 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Intoto Intoto object +// +// swagger:model intoto +type Intoto struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec IntotoSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Intoto) Kind() string { + return "intoto" +} + +// SetKind sets the kind of this subtype +func (m *Intoto) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Intoto) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec IntotoSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Intoto + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Intoto) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec IntotoSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this intoto +func (m *Intoto) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Intoto) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Intoto) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this intoto based on the context it is used +func (m *Intoto) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Intoto) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Intoto) UnmarshalBinary(b []byte) error { + var res Intoto + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go new file mode 100644 index 00000000000..142f0a19429 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IntotoSchema Intoto Schema +// +// # Intoto for Rekord objects +// +// swagger:model intotoSchema +type IntotoSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go new file mode 100644 index 00000000000..d3a6ca42f4c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go @@ -0,0 +1,539 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// IntotoV001Schema intoto v0.0.1 Schema +// +// # Schema for intoto object +// +// swagger:model intotoV001Schema +type IntotoV001Schema struct { + + // content + // Required: true + Content *IntotoV001SchemaContent `json:"content"` + + // The public key that can verify the signature + // Required: true + // Format: byte + PublicKey *strfmt.Base64 `json:"publicKey"` +} + +// Validate validates this intoto v001 schema +func (m *IntotoV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001Schema) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("content", "body", m.Content); err != nil { + return err + } + + if m.Content != nil { + if err := m.Content.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +func (m *IntotoV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v001 schema based on the context it is used +func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if m.Content != nil { + + if err := m.Content.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001Schema) UnmarshalBinary(b []byte) error { + var res IntotoV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV001SchemaContent intoto v001 schema content +// +// swagger:model IntotoV001SchemaContent +type IntotoV001SchemaContent struct { + + // envelope + Envelope string `json:"envelope,omitempty"` + + // hash + Hash *IntotoV001SchemaContentHash `json:"hash,omitempty"` + + // payload hash + PayloadHash *IntotoV001SchemaContentPayloadHash `json:"payloadHash,omitempty"` +} + +// Validate validates this intoto v001 schema content +func (m *IntotoV001SchemaContent) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001SchemaContent) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV001SchemaContent) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this intoto v001 schema content based on the context it is used +func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001SchemaContent) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001SchemaContent) UnmarshalBinary(b []byte) error { + var res IntotoV001SchemaContent + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope; this is computed by the rekor server, client-provided values are ignored +// +// swagger:model IntotoV001SchemaContentHash +type IntotoV001SchemaContentHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the archive + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v001 schema content hash +func (m *IntotoV001SchemaContentHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV001SchemaContentHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV001SchemaContentHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV001SchemaContentHashAlgorithmSha256 captures enum value "sha256" + IntotoV001SchemaContentHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV001SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV001SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV001SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV001SchemaContentHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v001 schema content hash based on the context it is used +func (m *IntotoV001SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001SchemaContentHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error { + var res IntotoV001SchemaContentHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope; this is computed by the rekor server, client-provided values are ignored +// +// swagger:model IntotoV001SchemaContentPayloadHash +type IntotoV001SchemaContentPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the envelope's payload + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v001 schema content payload hash +func (m *IntotoV001SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV001SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" + IntotoV001SchemaContentPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV001SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v001 schema content payload hash based on the context it is used +func (m *IntotoV001SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { + var res IntotoV001SchemaContentPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go new file mode 100644 index 00000000000..4ea4dcc58a5 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go @@ -0,0 +1,798 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// IntotoV002Schema intoto v0.0.2 Schema +// +// # Schema for intoto object +// +// swagger:model intotoV002Schema +type IntotoV002Schema struct { + + // content + // Required: true + Content *IntotoV002SchemaContent `json:"content"` +} + +// Validate validates this intoto v002 schema +func (m *IntotoV002Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002Schema) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("content", "body", m.Content); err != nil { + return err + } + + if m.Content != nil { + if err := m.Content.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this intoto v002 schema based on the context it is used +func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if m.Content != nil { + + if err := m.Content.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002Schema) UnmarshalBinary(b []byte) error { + var res IntotoV002Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContent intoto v002 schema content +// +// swagger:model IntotoV002SchemaContent +type IntotoV002SchemaContent struct { + + // envelope + // Required: true + Envelope *IntotoV002SchemaContentEnvelope `json:"envelope"` + + // hash + Hash *IntotoV002SchemaContentHash `json:"hash,omitempty"` + + // payload hash + PayloadHash *IntotoV002SchemaContentPayloadHash `json:"payloadHash,omitempty"` +} + +// Validate validates this intoto v002 schema content +func (m *IntotoV002SchemaContent) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContent) validateEnvelope(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"envelope", "body", m.Envelope); err != nil { + return err + } + + if m.Envelope != nil { + if err := m.Envelope.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content based on the context it is used +func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEnvelope(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error { + + if m.Envelope != nil { + + if err := m.Envelope.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "payloadHash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "payloadHash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContent) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContent) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContent + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentEnvelope dsse envelope +// +// swagger:model IntotoV002SchemaContentEnvelope +type IntotoV002SchemaContentEnvelope struct { + + // payload of the envelope + // Format: byte + Payload strfmt.Base64 `json:"payload,omitempty"` + + // type describing the payload + // Required: true + PayloadType *string `json:"payloadType"` + + // collection of all signatures of the envelope's payload + // Required: true + // Min Items: 1 + Signatures []*IntotoV002SchemaContentEnvelopeSignaturesItems0 `json:"signatures"` +} + +// Validate validates this intoto v002 schema content envelope +func (m *IntotoV002SchemaContentEnvelope) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePayloadType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignatures(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContentEnvelope) validatePayloadType(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"envelope"+"."+"payloadType", "body", m.PayloadType); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentEnvelope) validateSignatures(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"envelope"+"."+"signatures", "body", m.Signatures); err != nil { + return err + } + + iSignaturesSize := int64(len(m.Signatures)) + + if err := validate.MinItems("content"+"."+"envelope"+"."+"signatures", "body", iSignaturesSize, 1); err != nil { + return err + } + + for i := 0; i < len(m.Signatures); i++ { + if swag.IsZero(m.Signatures[i]) { // not required + continue + } + + if m.Signatures[i] != nil { + if err := m.Signatures[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content envelope based on the context it is used +func (m *IntotoV002SchemaContentEnvelope) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateSignatures(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Signatures); i++ { + + if m.Signatures[i] != nil { + + if swag.IsZero(m.Signatures[i]) { // not required + return nil + } + + if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelope) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelope) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentEnvelope + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentEnvelopeSignaturesItems0 a signature of the envelope's payload along with the public key for the signature +// +// swagger:model IntotoV002SchemaContentEnvelopeSignaturesItems0 +type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct { + + // optional id of the key used to create the signature + Keyid string `json:"keyid,omitempty"` + + // public key that corresponds to this signature + // Required: true + // Format: byte + PublicKey *strfmt.Base64 `json:"publicKey"` + + // signature of the payload + // Required: true + // Format: byte + Sig *strfmt.Base64 `json:"sig"` +} + +// Validate validates this intoto v002 schema content envelope signatures items0 +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error { + + if err := validate.Required("sig", "body", m.Sig); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentEnvelopeSignaturesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope +// +// swagger:model IntotoV002SchemaContentHash +type IntotoV002SchemaContentHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the archive + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v002 schema content hash +func (m *IntotoV002SchemaContentHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV002SchemaContentHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV002SchemaContentHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV002SchemaContentHashAlgorithmSha256 captures enum value "sha256" + IntotoV002SchemaContentHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV002SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV002SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV002SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content hash based on the context it is used +func (m *IntotoV002SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentHash) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// IntotoV002SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope +// +// swagger:model IntotoV002SchemaContentPayloadHash +type IntotoV002SchemaContentPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value of the payload + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v002 schema content payload hash +func (m *IntotoV002SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV002SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" + IntotoV002SchemaContentPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV002SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v002 schema content payload hash based on the context it is used +func (m *IntotoV002SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV002SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV002SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { + var res IntotoV002SchemaContentPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go new file mode 100644 index 00000000000..3df3d21b8a4 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Jar Java Archive (JAR) +// +// swagger:model jar +type Jar struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec JarSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Jar) Kind() string { + return "jar" +} + +// SetKind sets the kind of this subtype +func (m *Jar) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Jar) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec JarSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Jar + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Jar) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec JarSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this jar +func (m *Jar) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Jar) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Jar) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this jar based on the context it is used +func (m *Jar) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Jar) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Jar) UnmarshalBinary(b []byte) error { + var res Jar + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go new file mode 100644 index 00000000000..0cd3126ef51 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// JarSchema JAR Schema +// +// # Schema for JAR objects +// +// swagger:model jarSchema +type JarSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go new file mode 100644 index 00000000000..64335c36879 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go @@ -0,0 +1,602 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// JarV001Schema JAR v0.0.1 Schema +// +// # Schema for JAR entries +// +// swagger:model jarV001Schema +type JarV001Schema struct { + + // archive + // Required: true + Archive *JarV001SchemaArchive `json:"archive"` + + // signature + Signature *JarV001SchemaSignature `json:"signature,omitempty"` +} + +// Validate validates this jar v001 schema +func (m *JarV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateArchive(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001Schema) validateArchive(formats strfmt.Registry) error { + + if err := validate.Required("archive", "body", m.Archive); err != nil { + return err + } + + if m.Archive != nil { + if err := m.Archive.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive") + } + + return err + } + } + + return nil +} + +func (m *JarV001Schema) validateSignature(formats strfmt.Registry) error { + if swag.IsZero(m.Signature) { // not required + return nil + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this jar v001 schema based on the context it is used +func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateArchive(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error { + + if m.Archive != nil { + + if err := m.Archive.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive") + } + + return err + } + } + + return nil +} + +func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if swag.IsZero(m.Signature) { // not required + return nil + } + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001Schema) UnmarshalBinary(b []byte) error { + var res JarV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaArchive Information about the archive associated with the entry +// +// swagger:model JarV001SchemaArchive +type JarV001SchemaArchive struct { + + // Specifies the archive inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *JarV001SchemaArchiveHash `json:"hash,omitempty"` +} + +// Validate validates this jar v001 schema archive +func (m *JarV001SchemaArchive) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaArchive) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this jar v001 schema archive based on the context it is used +func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("archive" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("archive" + "." + "hash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaArchive) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaArchive) UnmarshalBinary(b []byte) error { + var res JarV001SchemaArchive + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaArchiveHash Specifies the hash algorithm and value encompassing the entire signed archive +// +// swagger:model JarV001SchemaArchiveHash +type JarV001SchemaArchiveHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the archive + // Required: true + Value *string `json:"value"` +} + +// Validate validates this jar v001 schema archive hash +func (m *JarV001SchemaArchiveHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var jarV001SchemaArchiveHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + jarV001SchemaArchiveHashTypeAlgorithmPropEnum = append(jarV001SchemaArchiveHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // JarV001SchemaArchiveHashAlgorithmSha256 captures enum value "sha256" + JarV001SchemaArchiveHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *JarV001SchemaArchiveHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, jarV001SchemaArchiveHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *JarV001SchemaArchiveHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("archive"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("archive"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *JarV001SchemaArchiveHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("archive"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this jar v001 schema archive hash based on context it is used +func (m *JarV001SchemaArchiveHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaArchiveHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaArchiveHash) UnmarshalBinary(b []byte) error { + var res JarV001SchemaArchiveHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaSignature Information about the included signature in the JAR file +// +// swagger:model JarV001SchemaSignature +type JarV001SchemaSignature struct { + + // Specifies the PKCS7 signature embedded within the JAR file + // Required: true + // Read Only: true + // Format: byte + Content strfmt.Base64 `json:"content"` + + // public key + // Required: true + PublicKey *JarV001SchemaSignaturePublicKey `json:"publicKey"` +} + +// Validate validates this jar v001 schema signature +func (m *JarV001SchemaSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaSignature) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +func (m *JarV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this jar v001 schema signature based on the context it is used +func (m *JarV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateContent(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaSignature) UnmarshalBinary(b []byte) error { + var res JarV001SchemaSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// JarV001SchemaSignaturePublicKey The X509 certificate containing the public key JAR which verifies the signature of the JAR +// +// swagger:model JarV001SchemaSignaturePublicKey +type JarV001SchemaSignaturePublicKey struct { + + // Specifies the content of the X509 certificate containing the public key used to verify the signature + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this jar v001 schema signature public key +func (m *JarV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JarV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this jar v001 schema signature public key based on the context it is used +func (m *JarV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *JarV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JarV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { + var res JarV001SchemaSignaturePublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go new file mode 100644 index 00000000000..65cf4f4b77e --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go @@ -0,0 +1,474 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LogEntry log entry +// +// swagger:model LogEntry +type LogEntry map[string]LogEntryAnon + +// Validate validates this log entry +func (m LogEntry) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if swag.IsZero(m[k]) { // not required + continue + } + if val, ok := m[k]; ok { + if err := val.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName(k) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName(k) + } + + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this log entry based on the context it is used +func (m LogEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for k := range m { + + if val, ok := m[k]; ok { + if err := val.ContextValidate(ctx, formats); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// LogEntryAnon log entry anon +// +// swagger:model LogEntryAnon +type LogEntryAnon struct { + + // attestation + Attestation *LogEntryAnonAttestation `json:"attestation,omitempty"` + + // body + // Required: true + Body any `json:"body"` + + // The time the entry was added to the log as a Unix timestamp in seconds + // Required: true + IntegratedTime *int64 `json:"integratedTime"` + + // This is the SHA256 hash of the DER-encoded public key for the log at the time the entry was included in the log + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + LogID *string `json:"logID"` + + // log index + // Required: true + // Minimum: 0 + LogIndex *int64 `json:"logIndex"` + + // verification + Verification *LogEntryAnonVerification `json:"verification,omitempty"` +} + +// Validate validates this log entry anon +func (m *LogEntryAnon) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAttestation(formats); err != nil { + res = append(res, err) + } + + if err := m.validateBody(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIntegratedTime(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogIndex(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVerification(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnon) validateAttestation(formats strfmt.Registry) error { + if swag.IsZero(m.Attestation) { // not required + return nil + } + + if m.Attestation != nil { + if err := m.Attestation.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("attestation") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("attestation") + } + + return err + } + } + + return nil +} + +func (m *LogEntryAnon) validateBody(formats strfmt.Registry) error { + + if m.Body == nil { + return errors.Required("body", "body", nil) + } + + return nil +} + +func (m *LogEntryAnon) validateIntegratedTime(formats strfmt.Registry) error { + + if err := validate.Required("integratedTime", "body", m.IntegratedTime); err != nil { + return err + } + + return nil +} + +func (m *LogEntryAnon) validateLogID(formats strfmt.Registry) error { + + if err := validate.Required("logID", "body", m.LogID); err != nil { + return err + } + + if err := validate.Pattern("logID", "body", *m.LogID, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *LogEntryAnon) validateLogIndex(formats strfmt.Registry) error { + + if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { + return err + } + + if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { + return err + } + + return nil +} + +func (m *LogEntryAnon) validateVerification(formats strfmt.Registry) error { + if swag.IsZero(m.Verification) { // not required + return nil + } + + if m.Verification != nil { + if err := m.Verification.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this log entry anon based on the context it is used +func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAttestation(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateVerification(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error { + + if m.Attestation != nil { + + if swag.IsZero(m.Attestation) { // not required + return nil + } + + if err := m.Attestation.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("attestation") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("attestation") + } + + return err + } + } + + return nil +} + +func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error { + + if m.Verification != nil { + + if swag.IsZero(m.Verification) { // not required + return nil + } + + if err := m.Verification.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogEntryAnon) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogEntryAnon) UnmarshalBinary(b []byte) error { + var res LogEntryAnon + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// LogEntryAnonAttestation log entry anon attestation +// +// swagger:model LogEntryAnonAttestation +type LogEntryAnonAttestation struct { + + // data + // Format: byte + Data strfmt.Base64 `json:"data,omitempty"` +} + +// Validate validates this log entry anon attestation +func (m *LogEntryAnonAttestation) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this log entry anon attestation based on context it is used +func (m *LogEntryAnonAttestation) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *LogEntryAnonAttestation) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogEntryAnonAttestation) UnmarshalBinary(b []byte) error { + var res LogEntryAnonAttestation + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// LogEntryAnonVerification log entry anon verification +// +// swagger:model LogEntryAnonVerification +type LogEntryAnonVerification struct { + + // inclusion proof + InclusionProof *InclusionProof `json:"inclusionProof,omitempty"` + + // Signature over the logID, logIndex, body and integratedTime. + // Format: byte + SignedEntryTimestamp strfmt.Base64 `json:"signedEntryTimestamp,omitempty"` +} + +// Validate validates this log entry anon verification +func (m *LogEntryAnonVerification) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInclusionProof(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnonVerification) validateInclusionProof(formats strfmt.Registry) error { + if swag.IsZero(m.InclusionProof) { // not required + return nil + } + + if m.InclusionProof != nil { + if err := m.InclusionProof.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification" + "." + "inclusionProof") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification" + "." + "inclusionProof") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this log entry anon verification based on the context it is used +func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateInclusionProof(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error { + + if m.InclusionProof != nil { + + if swag.IsZero(m.InclusionProof) { // not required + return nil + } + + if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("verification" + "." + "inclusionProof") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("verification" + "." + "inclusionProof") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogEntryAnonVerification) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogEntryAnonVerification) UnmarshalBinary(b []byte) error { + var res LogEntryAnonVerification + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go new file mode 100644 index 00000000000..6cbb9d64a2f --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go @@ -0,0 +1,230 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LogInfo log info +// +// swagger:model LogInfo +type LogInfo struct { + + // inactive shards + InactiveShards []*InactiveShardLogInfo `json:"inactiveShards"` + + // The current hash value stored at the root of the merkle tree + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` + + // The current signed tree head + // Required: true + SignedTreeHead *string `json:"signedTreeHead"` + + // The current treeID + // Required: true + // Pattern: ^[0-9]+$ + TreeID *string `json:"treeID"` + + // The current number of nodes in the merkle tree + // Required: true + // Minimum: 1 + TreeSize *int64 `json:"treeSize"` +} + +// Validate validates this log info +func (m *LogInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInactiveShards(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignedTreeHead(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogInfo) validateInactiveShards(formats strfmt.Registry) error { + if swag.IsZero(m.InactiveShards) { // not required + return nil + } + + for i := 0; i < len(m.InactiveShards); i++ { + if swag.IsZero(m.InactiveShards[i]) { // not required + continue + } + + if m.InactiveShards[i] != nil { + if err := m.InactiveShards[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +func (m *LogInfo) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *LogInfo) validateSignedTreeHead(formats strfmt.Registry) error { + + if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { + return err + } + + return nil +} + +func (m *LogInfo) validateTreeID(formats strfmt.Registry) error { + + if err := validate.Required("treeID", "body", m.TreeID); err != nil { + return err + } + + if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { + return err + } + + return nil +} + +func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error { + + if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { + return err + } + + if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this log info based on the context it is used +func (m *LogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateInactiveShards(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.InactiveShards); i++ { + + if m.InactiveShards[i] != nil { + + if swag.IsZero(m.InactiveShards[i]) { // not required + return nil + } + + if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogInfo) UnmarshalBinary(b []byte) error { + var res LogInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go new file mode 100644 index 00000000000..5b734a5fff3 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go @@ -0,0 +1,195 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// ProposedEntry proposed entry +// +// swagger:discriminator ProposedEntry kind +type ProposedEntry interface { + runtime.Validatable + runtime.ContextValidatable + + // kind + // Required: true + Kind() string + SetKind(string) + + // AdditionalProperties in base type shoud be handled just like regular properties + // At this moment, the base type property is pushed down to the subtype +} + +type proposedEntry struct { + kindField string +} + +// Kind gets the kind of this polymorphic type +func (m *proposedEntry) Kind() string { + return "ProposedEntry" +} + +// SetKind sets the kind of this polymorphic type +func (m *proposedEntry) SetKind(val string) { +} + +// UnmarshalProposedEntrySlice unmarshals polymorphic slices of ProposedEntry +func UnmarshalProposedEntrySlice(reader io.Reader, consumer runtime.Consumer) ([]ProposedEntry, error) { + var elements []json.RawMessage + if err := consumer.Consume(reader, &elements); err != nil { + return nil, err + } + + var result []ProposedEntry + for _, element := range elements { + obj, err := unmarshalProposedEntry(element, consumer) + if err != nil { + return nil, err + } + result = append(result, obj) + } + return result, nil +} + +// UnmarshalProposedEntry unmarshals polymorphic ProposedEntry +func UnmarshalProposedEntry(reader io.Reader, consumer runtime.Consumer) (ProposedEntry, error) { + // we need to read this twice, so first into a buffer + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + return unmarshalProposedEntry(data, consumer) +} + +func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEntry, error) { + buf := bytes.NewBuffer(data) + buf2 := bytes.NewBuffer(data) + + // the first time this is read is to fetch the value of the kind property. + var getType struct { + Kind string `json:"kind"` + } + if err := consumer.Consume(buf, &getType); err != nil { + return nil, err + } + + if err := validate.RequiredString("kind", "body", getType.Kind); err != nil { + return nil, err + } + + // The value of kind is used to determine which type to create and unmarshal the data into + switch getType.Kind { + case "ProposedEntry": + var result proposedEntry + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "alpine": + var result Alpine + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "cose": + var result Cose + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "dsse": + var result DSSE + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "hashedrekord": + var result Hashedrekord + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "helm": + var result Helm + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "intoto": + var result Intoto + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "jar": + var result Jar + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "rekord": + var result Rekord + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "rfc3161": + var result Rfc3161 + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "rpm": + var result Rpm + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + case "tuf": + var result TUF + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil + } + return nil, errors.New(422, "invalid kind value: %q", getType.Kind) +} + +// Validate validates this proposed entry +func (m *proposedEntry) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this proposed entry based on context it is used +func (m *proposedEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go new file mode 100644 index 00000000000..81c8ff05454 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rekord Rekord object +// +// swagger:model rekord +type Rekord struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RekordSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Rekord) Kind() string { + return "rekord" +} + +// SetKind sets the kind of this subtype +func (m *Rekord) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Rekord) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RekordSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Rekord + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Rekord) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RekordSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this rekord +func (m *Rekord) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rekord) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Rekord) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this rekord based on the context it is used +func (m *Rekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Rekord) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rekord) UnmarshalBinary(b []byte) error { + var res Rekord + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go new file mode 100644 index 00000000000..9c33e4044e9 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RekordSchema Rekor Schema +// +// # Schema for Rekord objects +// +// swagger:model rekordSchema +type RekordSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go new file mode 100644 index 00000000000..0f4977ca736 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go @@ -0,0 +1,644 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// RekordV001Schema Rekor v0.0.1 Schema +// +// # Schema for Rekord object +// +// swagger:model rekordV001Schema +type RekordV001Schema struct { + + // data + // Required: true + Data *RekordV001SchemaData `json:"data"` + + // signature + // Required: true + Signature *RekordV001SchemaSignature `json:"signature"` +} + +// Validate validates this rekord v001 schema +func (m *RekordV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignature(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001Schema) validateData(formats strfmt.Registry) error { + + if err := validate.Required("data", "body", m.Data); err != nil { + return err + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *RekordV001Schema) validateSignature(formats strfmt.Registry) error { + + if err := validate.Required("signature", "body", m.Signature); err != nil { + return err + } + + if m.Signature != nil { + if err := m.Signature.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rekord v001 schema based on the context it is used +func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSignature(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + + if err := m.Data.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data") + } + + return err + } + } + + return nil +} + +func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { + + if m.Signature != nil { + + if err := m.Signature.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001Schema) UnmarshalBinary(b []byte) error { + var res RekordV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaData Information about the content associated with the entry +// +// swagger:model RekordV001SchemaData +type RekordV001SchemaData struct { + + // Specifies the content inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *RekordV001SchemaDataHash `json:"hash,omitempty"` +} + +// Validate validates this rekord v001 schema data +func (m *RekordV001SchemaData) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaData) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rekord v001 schema data based on the context it is used +func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("data" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("data" + "." + "hash") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaData) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaDataHash Specifies the hash algorithm and value for the content +// +// swagger:model RekordV001SchemaDataHash +type RekordV001SchemaDataHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the content + // Required: true + Value *string `json:"value"` +} + +// Validate validates this rekord v001 schema data hash +func (m *RekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var rekordV001SchemaDataHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + rekordV001SchemaDataHashTypeAlgorithmPropEnum = append(rekordV001SchemaDataHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // RekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" + RekordV001SchemaDataHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *RekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, rekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *RekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this rekord v001 schema data hash based on the context it is used +func (m *RekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaDataHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaSignature Information about the detached signature associated with the entry +// +// swagger:model RekordV001SchemaSignature +type RekordV001SchemaSignature struct { + + // Specifies the content of the signature inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` + + // Specifies the format of the signature + // Required: true + // Enum: ["pgp","minisign","x509","ssh"] + Format *string `json:"format"` + + // public key + // Required: true + PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey"` +} + +// Validate validates this rekord v001 schema signature +func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if err := m.validateFormat(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaSignature) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +var rekordV001SchemaSignatureTypeFormatPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["pgp","minisign","x509","ssh"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + rekordV001SchemaSignatureTypeFormatPropEnum = append(rekordV001SchemaSignatureTypeFormatPropEnum, v) + } +} + +const ( + + // RekordV001SchemaSignatureFormatPgp captures enum value "pgp" + RekordV001SchemaSignatureFormatPgp string = "pgp" + + // RekordV001SchemaSignatureFormatMinisign captures enum value "minisign" + RekordV001SchemaSignatureFormatMinisign string = "minisign" + + // RekordV001SchemaSignatureFormatX509 captures enum value "x509" + RekordV001SchemaSignatureFormatX509 string = "x509" + + // RekordV001SchemaSignatureFormatSSH captures enum value "ssh" + RekordV001SchemaSignatureFormatSSH string = "ssh" +) + +// prop value enum +func (m *RekordV001SchemaSignature) validateFormatEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, rekordV001SchemaSignatureTypeFormatPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"format", "body", m.Format); err != nil { + return err + } + + // value enum + if err := m.validateFormatEnum("signature"+"."+"format", "body", *m.Format); err != nil { + return err + } + + return nil +} + +func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rekord v001 schema signature based on the context it is used +func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("signature" + "." + "publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("signature" + "." + "publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaSignature) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaSignature) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaSignature + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RekordV001SchemaSignaturePublicKey The public key that can verify the signature +// +// swagger:model RekordV001SchemaSignaturePublicKey +type RekordV001SchemaSignaturePublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this rekord v001 schema signature public key +func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RekordV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rekord v001 schema signature public key based on context it is used +func (m *RekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { + var res RekordV001SchemaSignaturePublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go new file mode 100644 index 00000000000..ef8d42e7a2d --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rfc3161 RFC3161 Timestamp +// +// swagger:model rfc3161 +type Rfc3161 struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec Rfc3161Schema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Rfc3161) Kind() string { + return "rfc3161" +} + +// SetKind sets the kind of this subtype +func (m *Rfc3161) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Rfc3161) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec Rfc3161Schema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Rfc3161 + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Rfc3161) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec Rfc3161Schema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this rfc3161 +func (m *Rfc3161) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Rfc3161) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this rfc3161 based on the context it is used +func (m *Rfc3161) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Rfc3161) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rfc3161) UnmarshalBinary(b []byte) error { + var res Rfc3161 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go new file mode 100644 index 00000000000..319358d400c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Rfc3161Schema Timestamp Schema +// +// # Schema for RFC 3161 timestamp objects +// +// swagger:model rfc3161Schema +type Rfc3161Schema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go new file mode 100644 index 00000000000..c2037cd7ade --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go @@ -0,0 +1,192 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rfc3161V001Schema Timestamp v0.0.1 Schema +// +// # Schema for RFC3161 entries +// +// swagger:model rfc3161V001Schema +type Rfc3161V001Schema struct { + + // tsr + // Required: true + Tsr *Rfc3161V001SchemaTsr `json:"tsr"` +} + +// Validate validates this rfc3161 v001 schema +func (m *Rfc3161V001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateTsr(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161V001Schema) validateTsr(formats strfmt.Registry) error { + + if err := validate.Required("tsr", "body", m.Tsr); err != nil { + return err + } + + if m.Tsr != nil { + if err := m.Tsr.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("tsr") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("tsr") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rfc3161 v001 schema based on the context it is used +func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateTsr(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error { + + if m.Tsr != nil { + + if err := m.Tsr.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("tsr") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("tsr") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Rfc3161V001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rfc3161V001Schema) UnmarshalBinary(b []byte) error { + var res Rfc3161V001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// Rfc3161V001SchemaTsr Information about the tsr file associated with the entry +// +// swagger:model Rfc3161V001SchemaTsr +type Rfc3161V001SchemaTsr struct { + + // Specifies the tsr file content inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this rfc3161 v001 schema tsr +func (m *Rfc3161V001SchemaTsr) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rfc3161V001SchemaTsr) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("tsr"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rfc3161 v001 schema tsr based on context it is used +func (m *Rfc3161V001SchemaTsr) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Rfc3161V001SchemaTsr) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rfc3161V001SchemaTsr) UnmarshalBinary(b []byte) error { + var res Rfc3161V001SchemaTsr + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go new file mode 100644 index 00000000000..8b1f10c77e6 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Rpm RPM package +// +// swagger:model rpm +type Rpm struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RpmSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Rpm) Kind() string { + return "rpm" +} + +// SetKind sets the kind of this subtype +func (m *Rpm) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Rpm) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RpmSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Rpm + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Rpm) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec RpmSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this rpm +func (m *Rpm) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Rpm) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Rpm) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this rpm based on the context it is used +func (m *Rpm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Rpm) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Rpm) UnmarshalBinary(b []byte) error { + var res Rpm + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go new file mode 100644 index 00000000000..2520dfb9c78 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RpmSchema RPM Schema +// +// # Schema for RPM objects +// +// swagger:model rpmSchema +type RpmSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go new file mode 100644 index 00000000000..a7636bd5fcd --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go @@ -0,0 +1,475 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// RpmV001Schema RPM v0.0.1 Schema +// +// # Schema for RPM entries +// +// swagger:model rpmV001Schema +type RpmV001Schema struct { + + // package + // Required: true + Package *RpmV001SchemaPackage `json:"package"` + + // public key + // Required: true + PublicKey *RpmV001SchemaPublicKey `json:"publicKey"` +} + +// Validate validates this rpm v001 schema +func (m *RpmV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePackage(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001Schema) validatePackage(formats strfmt.Registry) error { + + if err := validate.Required("package", "body", m.Package); err != nil { + return err + } + + if m.Package != nil { + if err := m.Package.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *RpmV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rpm v001 schema based on the context it is used +func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePackage(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { + + if m.Package != nil { + + if err := m.Package.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package") + } + + return err + } + } + + return nil +} + +func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001Schema) UnmarshalBinary(b []byte) error { + var res RpmV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RpmV001SchemaPackage Information about the package associated with the entry +// +// swagger:model RpmV001SchemaPackage +type RpmV001SchemaPackage struct { + + // Specifies the package inline within the document + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // hash + Hash *RpmV001SchemaPackageHash `json:"hash,omitempty"` + + // Values of the RPM headers + // Read Only: true + Headers map[string]string `json:"headers,omitempty"` +} + +// Validate validates this rpm v001 schema package +func (m *RpmV001SchemaPackage) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001SchemaPackage) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if m.Hash != nil { + if err := m.Hash.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this rpm v001 schema package based on the context it is used +func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateHeaders(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { + + if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := m.Hash.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("package" + "." + "hash") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("package" + "." + "hash") + } + + return err + } + } + + return nil +} + +func (m *RpmV001SchemaPackage) contextValidateHeaders(ctx context.Context, formats strfmt.Registry) error { + + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001SchemaPackage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001SchemaPackage) UnmarshalBinary(b []byte) error { + var res RpmV001SchemaPackage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RpmV001SchemaPackageHash Specifies the hash algorithm and value for the package +// +// swagger:model RpmV001SchemaPackageHash +type RpmV001SchemaPackageHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: ["sha256"] + Algorithm *string `json:"algorithm"` + + // The hash value for the package + // Required: true + Value *string `json:"value"` +} + +// Validate validates this rpm v001 schema package hash +func (m *RpmV001SchemaPackageHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var rpmV001SchemaPackageHashTypeAlgorithmPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + rpmV001SchemaPackageHashTypeAlgorithmPropEnum = append(rpmV001SchemaPackageHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // RpmV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" + RpmV001SchemaPackageHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *RpmV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, rpmV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RpmV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *RpmV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rpm v001 schema package hash based on context it is used +func (m *RpmV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001SchemaPackageHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001SchemaPackageHash) UnmarshalBinary(b []byte) error { + var res RpmV001SchemaPackageHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// RpmV001SchemaPublicKey The PGP public key that can verify the RPM signature +// +// swagger:model RpmV001SchemaPublicKey +type RpmV001SchemaPublicKey struct { + + // Specifies the content of the public key inline within the document + // Required: true + // Format: byte + Content *strfmt.Base64 `json:"content"` +} + +// Validate validates this rpm v001 schema public key +func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RpmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this rpm v001 schema public key based on context it is used +func (m *RpmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RpmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RpmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { + var res RpmV001SchemaPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go new file mode 100644 index 00000000000..a1b7d08001f --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go @@ -0,0 +1,350 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SearchIndex search index +// +// swagger:model SearchIndex +type SearchIndex struct { + + // email + // Format: email + Email strfmt.Email `json:"email,omitempty"` + + // hash + // Pattern: ^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ + Hash string `json:"hash,omitempty"` + + // operator + // Enum: ["and","or"] + Operator string `json:"operator,omitempty"` + + // public key + PublicKey *SearchIndexPublicKey `json:"publicKey,omitempty"` +} + +// Validate validates this search index +func (m *SearchIndex) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEmail(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOperator(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchIndex) validateEmail(formats strfmt.Registry) error { + if swag.IsZero(m.Email) { // not required + return nil + } + + if err := validate.FormatOf("email", "body", "email", m.Email.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *SearchIndex) validateHash(formats strfmt.Registry) error { + if swag.IsZero(m.Hash) { // not required + return nil + } + + if err := validate.Pattern("hash", "body", m.Hash, `^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil { + return err + } + + return nil +} + +var searchIndexTypeOperatorPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["and","or"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + searchIndexTypeOperatorPropEnum = append(searchIndexTypeOperatorPropEnum, v) + } +} + +const ( + + // SearchIndexOperatorAnd captures enum value "and" + SearchIndexOperatorAnd string = "and" + + // SearchIndexOperatorOr captures enum value "or" + SearchIndexOperatorOr string = "or" +) + +// prop value enum +func (m *SearchIndex) validateOperatorEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, searchIndexTypeOperatorPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *SearchIndex) validateOperator(formats strfmt.Registry) error { + if swag.IsZero(m.Operator) { // not required + return nil + } + + // value enum + if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil { + return err + } + + return nil +} + +func (m *SearchIndex) validatePublicKey(formats strfmt.Registry) error { + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if m.PublicKey != nil { + if err := m.PublicKey.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this search index based on the context it is used +func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePublicKey(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { + + if m.PublicKey != nil { + + if swag.IsZero(m.PublicKey) { // not required + return nil + } + + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("publicKey") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("publicKey") + } + + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SearchIndex) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SearchIndex) UnmarshalBinary(b []byte) error { + var res SearchIndex + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// SearchIndexPublicKey search index public key +// +// swagger:model SearchIndexPublicKey +type SearchIndexPublicKey struct { + + // content + // Format: byte + Content strfmt.Base64 `json:"content,omitempty"` + + // format + // Required: true + // Enum: ["pgp","x509","minisign","ssh","tuf"] + Format *string `json:"format"` + + // url + // Format: uri + URL strfmt.URI `json:"url,omitempty"` +} + +// Validate validates this search index public key +func (m *SearchIndexPublicKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFormat(formats); err != nil { + res = append(res, err) + } + + if err := m.validateURL(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var searchIndexPublicKeyTypeFormatPropEnum []any + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["pgp","x509","minisign","ssh","tuf"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + searchIndexPublicKeyTypeFormatPropEnum = append(searchIndexPublicKeyTypeFormatPropEnum, v) + } +} + +const ( + + // SearchIndexPublicKeyFormatPgp captures enum value "pgp" + SearchIndexPublicKeyFormatPgp string = "pgp" + + // SearchIndexPublicKeyFormatX509 captures enum value "x509" + SearchIndexPublicKeyFormatX509 string = "x509" + + // SearchIndexPublicKeyFormatMinisign captures enum value "minisign" + SearchIndexPublicKeyFormatMinisign string = "minisign" + + // SearchIndexPublicKeyFormatSSH captures enum value "ssh" + SearchIndexPublicKeyFormatSSH string = "ssh" + + // SearchIndexPublicKeyFormatTUF captures enum value "tuf" + SearchIndexPublicKeyFormatTUF string = "tuf" +) + +// prop value enum +func (m *SearchIndexPublicKey) validateFormatEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, searchIndexPublicKeyTypeFormatPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *SearchIndexPublicKey) validateFormat(formats strfmt.Registry) error { + + if err := validate.Required("publicKey"+"."+"format", "body", m.Format); err != nil { + return err + } + + // value enum + if err := m.validateFormatEnum("publicKey"+"."+"format", "body", *m.Format); err != nil { + return err + } + + return nil +} + +func (m *SearchIndexPublicKey) validateURL(formats strfmt.Registry) error { + if swag.IsZero(m.URL) { // not required + return nil + } + + if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this search index public key based on context it is used +func (m *SearchIndexPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SearchIndexPublicKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SearchIndexPublicKey) UnmarshalBinary(b []byte) error { + var res SearchIndexPublicKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go new file mode 100644 index 00000000000..6833c8f6d8f --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go @@ -0,0 +1,306 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + stderrors "errors" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SearchLogQuery search log query +// +// swagger:model SearchLogQuery +type SearchLogQuery struct { + entriesField []ProposedEntry + + // entry u UI ds + // Max Items: 10 + // Min Items: 1 + EntryUUIDs []string `json:"entryUUIDs"` + + // log indexes + // Max Items: 10 + // Min Items: 1 + LogIndexes []*int64 `json:"logIndexes"` +} + +// Entries gets the entries of this base type +func (m *SearchLogQuery) Entries() []ProposedEntry { + return m.entriesField +} + +// SetEntries sets the entries of this base type +func (m *SearchLogQuery) SetEntries(val []ProposedEntry) { + m.entriesField = val +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *SearchLogQuery) UnmarshalJSON(raw []byte) error { + var data struct { + Entries json.RawMessage `json:"entries"` + + EntryUUIDs []string `json:"entryUUIDs"` + + LogIndexes []*int64 `json:"logIndexes"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var propEntries []ProposedEntry + if string(data.Entries) != "null" { + entries, err := UnmarshalProposedEntrySlice(bytes.NewBuffer(data.Entries), runtime.JSONConsumer()) + if err != nil && !stderrors.Is(err, io.EOF) { + return err + } + propEntries = entries + } + + var result SearchLogQuery + + // entries + result.entriesField = propEntries + + // entryUUIDs + result.EntryUUIDs = data.EntryUUIDs + + // logIndexes + result.LogIndexes = data.LogIndexes + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m SearchLogQuery) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + EntryUUIDs []string `json:"entryUUIDs"` + + LogIndexes []*int64 `json:"logIndexes"` + }{ + + EntryUUIDs: m.EntryUUIDs, + + LogIndexes: m.LogIndexes, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Entries []ProposedEntry `json:"entries"` + }{ + + Entries: m.entriesField, + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this search log query +func (m *SearchLogQuery) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEntries(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEntryUUIDs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogIndexes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchLogQuery) validateEntries(formats strfmt.Registry) error { + if swag.IsZero(m.Entries()) { // not required + return nil + } + + iEntriesSize := int64(len(m.Entries())) + + if err := validate.MinItems("entries", "body", iEntriesSize, 1); err != nil { + return err + } + + if err := validate.MaxItems("entries", "body", iEntriesSize, 10); err != nil { + return err + } + + for i := 0; i < len(m.Entries()); i++ { + + if err := m.entriesField[i].Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("entries" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("entries" + "." + strconv.Itoa(i)) + } + + return err + } + + } + + return nil +} + +func (m *SearchLogQuery) validateEntryUUIDs(formats strfmt.Registry) error { + if swag.IsZero(m.EntryUUIDs) { // not required + return nil + } + + iEntryUUIDsSize := int64(len(m.EntryUUIDs)) + + if err := validate.MinItems("entryUUIDs", "body", iEntryUUIDsSize, 1); err != nil { + return err + } + + if err := validate.MaxItems("entryUUIDs", "body", iEntryUUIDsSize, 10); err != nil { + return err + } + + for i := 0; i < len(m.EntryUUIDs); i++ { + + if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^([0-9a-fA-F]{64}|[0-9a-fA-F]{80})$`); err != nil { + return err + } + + } + + return nil +} + +func (m *SearchLogQuery) validateLogIndexes(formats strfmt.Registry) error { + if swag.IsZero(m.LogIndexes) { // not required + return nil + } + + iLogIndexesSize := int64(len(m.LogIndexes)) + + if err := validate.MinItems("logIndexes", "body", iLogIndexesSize, 1); err != nil { + return err + } + + if err := validate.MaxItems("logIndexes", "body", iLogIndexesSize, 10); err != nil { + return err + } + + for i := 0; i < len(m.LogIndexes); i++ { + if swag.IsZero(m.LogIndexes[i]) { // not required + continue + } + + if err := validate.MinimumInt("logIndexes"+"."+strconv.Itoa(i), "body", *m.LogIndexes[i], 0, false); err != nil { + return err + } + + } + + return nil +} + +// ContextValidate validate this search log query based on the context it is used +func (m *SearchLogQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEntries(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Entries()); i++ { + + if swag.IsZero(m.entriesField[i]) { // not required + return nil + } + + if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("entries" + "." + strconv.Itoa(i)) + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("entries" + "." + strconv.Itoa(i)) + } + + return err + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SearchLogQuery) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SearchLogQuery) UnmarshalBinary(b []byte) error { + var res SearchLogQuery + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go new file mode 100644 index 00000000000..a5f6eff0f7a --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TUF TUF metadata +// +// swagger:model tuf +type TUF struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec TUFSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *TUF) Kind() string { + return "tuf" +} + +// SetKind sets the kind of this subtype +func (m *TUF) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *TUF) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec TUFSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result TUF + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m TUF) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec TUFSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this tuf +func (m *TUF) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUF) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *TUF) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this tuf based on the context it is used +func (m *TUF) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *TUF) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUF) UnmarshalBinary(b []byte) error { + var res TUF + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go new file mode 100644 index 00000000000..7c944ef92da --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// TUFSchema TUF Schema +// +// # Schema for TUF metadata objects +// +// swagger:model tufSchema +type TUFSchema any diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go new file mode 100644 index 00000000000..69b5e93d614 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go @@ -0,0 +1,321 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + stderrors "errors" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// TUFV001Schema TUF v0.0.1 Schema +// +// # Schema for TUF metadata entries +// +// swagger:model tufV001Schema +type TUFV001Schema struct { + + // metadata + // Required: true + Metadata *TUFV001SchemaMetadata `json:"metadata"` + + // root + // Required: true + Root *TUFV001SchemaRoot `json:"root"` + + // TUF specification version + // Read Only: true + SpecVersion string `json:"spec_version,omitempty"` +} + +// Validate validates this tuf v001 schema +func (m *TUFV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMetadata(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRoot(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001Schema) validateMetadata(formats strfmt.Registry) error { + + if err := validate.Required("metadata", "body", m.Metadata); err != nil { + return err + } + + if m.Metadata != nil { + if err := m.Metadata.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("metadata") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("metadata") + } + + return err + } + } + + return nil +} + +func (m *TUFV001Schema) validateRoot(formats strfmt.Registry) error { + + if err := validate.Required("root", "body", m.Root); err != nil { + return err + } + + if m.Root != nil { + if err := m.Root.Validate(formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("root") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("root") + } + + return err + } + } + + return nil +} + +// ContextValidate validate this tuf v001 schema based on the context it is used +func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMetadata(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateRoot(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSpecVersion(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { + + if m.Metadata != nil { + + if err := m.Metadata.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("metadata") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("metadata") + } + + return err + } + } + + return nil +} + +func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error { + + if m.Root != nil { + + if err := m.Root.ContextValidate(ctx, formats); err != nil { + ve := new(errors.Validation) + if stderrors.As(err, &ve) { + return ve.ValidateName("root") + } + ce := new(errors.CompositeError) + if stderrors.As(err, &ce) { + return ce.ValidateName("root") + } + + return err + } + } + + return nil +} + +func (m *TUFV001Schema) contextValidateSpecVersion(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "spec_version", "body", m.SpecVersion); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TUFV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUFV001Schema) UnmarshalBinary(b []byte) error { + var res TUFV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// TUFV001SchemaMetadata TUF metadata +// +// swagger:model TUFV001SchemaMetadata +type TUFV001SchemaMetadata struct { + + // Specifies the metadata inline within the document + // Required: true + Content any `json:"content"` +} + +// Validate validates this TUF v001 schema metadata +func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error { + + if m.Content == nil { + return errors.Required("metadata"+"."+"content", "body", nil) + } + + return nil +} + +// ContextValidate validates this TUF v001 schema metadata based on context it is used +func (m *TUFV001SchemaMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *TUFV001SchemaMetadata) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUFV001SchemaMetadata) UnmarshalBinary(b []byte) error { + var res TUFV001SchemaMetadata + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// TUFV001SchemaRoot root metadata containing about the public keys used to sign the manifest +// +// swagger:model TUFV001SchemaRoot +type TUFV001SchemaRoot struct { + + // Specifies the metadata inline within the document + // Required: true + Content any `json:"content"` +} + +// Validate validates this TUF v001 schema root +func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateContent(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TUFV001SchemaRoot) validateContent(formats strfmt.Registry) error { + + if m.Content == nil { + return errors.Required("root"+"."+"content", "body", nil) + } + + return nil +} + +// ContextValidate validates this TUF v001 schema root based on context it is used +func (m *TUFV001SchemaRoot) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *TUFV001SchemaRoot) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TUFV001SchemaRoot) UnmarshalBinary(b []byte) error { + var res TUFV001SchemaRoot + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go b/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go new file mode 100644 index 00000000000..3d2a2ed0b95 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go @@ -0,0 +1,36 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +type LoggerImpl interface { + Error(...any) + Errorf(string, ...any) + Panic(...any) + Info(...any) + Infof(string, ...any) +} + +var Logger LoggerImpl = &discardLogger{} + +type discardLogger struct{} + +func (l *discardLogger) Panic(err ...any) { + panic(err) +} +func (l *discardLogger) Error(...any) {} +func (l *discardLogger) Errorf(string, ...any) {} +func (l *discardLogger) Infof(string, ...any) {} +func (l *discardLogger) Info(...any) {} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go b/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go new file mode 100644 index 00000000000..4566e1ddfe1 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go @@ -0,0 +1,38 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package identity + +type Identity struct { + // Types include: + // - *rsa.PublicKey + // - *ecdsa.PublicKey + // - ed25519.PublicKey + // - *x509.Certificate + // - openpgp.EntityList (golang.org/x/crypto/openpgp) + // - *minisign.PublicKey (github.com/jedisct1/go-minisign) + // - ssh.PublicKey (golang.org/x/crypto/ssh) + Crypto any + // Raw key or certificate extracted from Crypto. Values include: + // - PKIX ASN.1 DER-encoded public key + // - ASN.1 DER-encoded certificate + Raw []byte + // For keys, certificates, and minisign, hex-encoded SHA-256 digest + // of the public key or certificate + // For SSH and PGP, Fingerprint is the standard for each ecosystem + // For SSH, unpadded base-64 encoded SHA-256 digest of the key + // For PGP, hex-encoded SHA-1 digest of a key, which can be either + // a primary key or subkey + Fingerprint string +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go b/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go new file mode 100644 index 00000000000..19688bd1bdb --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go @@ -0,0 +1,40 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pki + +import ( + "io" + + "github.com/sigstore/rekor/pkg/pki/identity" + sigsig "github.com/sigstore/sigstore/pkg/signature" +) + +// PublicKey Generic object representing a public key (regardless of format & algorithm) +type PublicKey interface { + CanonicalValue() ([]byte, error) + // Deprecated: EmailAddresses() will be deprecated in favor of Subjects() which will + // also return Subject URIs present in public keys. + EmailAddresses() []string + Subjects() []string + // Identities returns a list of typed keys and certificates. + Identities() ([]identity.Identity, error) +} + +// Signature Generic object representing a signature (regardless of format & algorithm) +type Signature interface { + CanonicalValue() ([]byte, error) + Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go new file mode 100644 index 00000000000..2cfaf816009 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go @@ -0,0 +1,276 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509 + +import ( + "bytes" + "crypto" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" + + "github.com/asaskevich/govalidator" + "github.com/sigstore/rekor/pkg/pki/identity" + "github.com/sigstore/sigstore/pkg/cryptoutils" + sigsig "github.com/sigstore/sigstore/pkg/signature" +) + +// EmailAddressOID defined by https://oidref.com/1.2.840.113549.1.9.1 +var EmailAddressOID asn1.ObjectIdentifier = []int{1, 2, 840, 113549, 1, 9, 1} + +type Signature struct { + signature []byte + verifierLoadOpts []sigsig.LoadOption +} + +// NewSignature creates and validates an x509 signature object +func NewSignature(r io.Reader) (*Signature, error) { + return NewSignatureWithOpts(r) +} + +func NewSignatureWithOpts(r io.Reader, opts ...sigsig.LoadOption) (*Signature, error) { + b, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return &Signature{ + signature: b, + verifierLoadOpts: opts, + }, nil +} + +// CanonicalValue implements the pki.Signature interface +func (s Signature) CanonicalValue() ([]byte, error) { + return s.signature, nil +} + +// Verify implements the pki.Signature interface +func (s Signature) Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error { + if len(s.signature) == 0 { + //lint:ignore ST1005 X509 is proper use of term + return errors.New("X509 signature has not been initialized") + } + + key, ok := k.(*PublicKey) + if !ok { + return fmt.Errorf("invalid public key type for: %v", k) + } + + p := key.key + if p == nil { + switch { + case key.cert != nil: + p = key.cert.c.PublicKey + case len(key.certs) > 0: + if err := verifyCertChain(key.certs); err != nil { + return err + } + p = key.certs[0].PublicKey + default: + return errors.New("no public key found") + } + } + + verifier, err := sigsig.LoadVerifierWithOpts(p, s.verifierLoadOpts...) + if err != nil { + return err + } + return verifier.VerifySignature(bytes.NewReader(s.signature), r, opts...) +} + +// PublicKey Public Key that follows the x509 standard +type PublicKey struct { + key interface{} + cert *cert + certs []*x509.Certificate +} + +type cert struct { + c *x509.Certificate + b []byte +} + +// NewPublicKey implements the pki.PublicKey interface +func NewPublicKey(r io.Reader) (*PublicKey, error) { + rawPub, err := io.ReadAll(r) + if err != nil { + return nil, err + } + trimmedRawPub := bytes.TrimSpace(rawPub) + + block, rest := pem.Decode(trimmedRawPub) + if block == nil { + return nil, errors.New("invalid public key: failure decoding PEM") + } + + // Handle certificate chain, concatenated PEM-encoded certificates + if len(rest) > 0 { + // Support up to 10 certificates in a chain, to avoid parsing extremely long chains + certs, err := cryptoutils.UnmarshalCertificatesFromPEMLimited(trimmedRawPub, 10) + if err != nil { + return nil, err + } + return &PublicKey{certs: certs}, nil + } + + switch block.Type { + case string(cryptoutils.PublicKeyPEMType): + key, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + return &PublicKey{key: key}, nil + case string(cryptoutils.CertificatePEMType): + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + return &PublicKey{ + cert: &cert{ + c: c, + b: block.Bytes, + }}, nil + } + return nil, fmt.Errorf("invalid public key: cannot handle type %v", block.Type) +} + +// CanonicalValue implements the pki.PublicKey interface +func (k PublicKey) CanonicalValue() (encoded []byte, err error) { + + switch { + case k.key != nil: + encoded, err = cryptoutils.MarshalPublicKeyToPEM(k.key) + case k.cert != nil: + encoded, err = cryptoutils.MarshalCertificateToPEM(k.cert.c) + case k.certs != nil: + encoded, err = cryptoutils.MarshalCertificatesToPEM(k.certs) + default: + err = errors.New("x509 public key has not been initialized") + } + + return +} + +func (k PublicKey) CryptoPubKey() crypto.PublicKey { + if k.cert != nil { + return k.cert.c.PublicKey + } + if len(k.certs) > 0 { + return k.certs[0].PublicKey + } + return k.key +} + +// EmailAddresses implements the pki.PublicKey interface +func (k PublicKey) EmailAddresses() []string { + var names []string + var cert *x509.Certificate + if k.cert != nil { + cert = k.cert.c + } else if len(k.certs) > 0 { + cert = k.certs[0] + } + if cert != nil { + for _, name := range cert.EmailAddresses { + if govalidator.IsEmail(name) { + names = append(names, strings.ToLower(name)) + } + } + } + return names +} + +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + var subjects []string + var cert *x509.Certificate + if k.cert != nil { + cert = k.cert.c + } else if len(k.certs) > 0 { + cert = k.certs[0] + } + if cert != nil { + subjects = cryptoutils.GetSubjectAlternateNames(cert) + } + return subjects +} + +// Identities implements the pki.PublicKey interface +func (k PublicKey) Identities() ([]identity.Identity, error) { + // k contains either a key, a cert, or a list of certs + if k.key != nil { + pkixKey, err := cryptoutils.MarshalPublicKeyToDER(k.key) + if err != nil { + return nil, err + } + digest := sha256.Sum256(pkixKey) + return []identity.Identity{{ + Crypto: k.key, + Raw: pkixKey, + Fingerprint: hex.EncodeToString(digest[:]), + }}, nil + } + + var cert *x509.Certificate + switch { + case k.cert != nil: + cert = k.cert.c + case len(k.certs) > 0: + cert = k.certs[0] + default: + return nil, errors.New("no key, certificate or certificate chain provided") + } + + digest := sha256.Sum256(cert.Raw) + return []identity.Identity{{ + Crypto: cert, + Raw: cert.Raw, + Fingerprint: hex.EncodeToString(digest[:]), + }}, nil +} + +func verifyCertChain(certChain []*x509.Certificate) error { + if len(certChain) == 0 { + return errors.New("no certificate chain provided") + } + // No certificate chain to verify + if len(certChain) == 1 { + return nil + } + rootPool := x509.NewCertPool() + rootPool.AddCert(certChain[len(certChain)-1]) + subPool := x509.NewCertPool() + for _, c := range certChain[1 : len(certChain)-1] { + subPool.AddCert(c) + } + if _, err := certChain[0].Verify(x509.VerifyOptions{ + Roots: rootPool, + Intermediates: subPool, + // Allow any key usage + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + // Expired certificates can be uploaded and should be verifiable + CurrentTime: certChain[0].NotBefore, + }); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/README.md b/vendor/github.com/sigstore/rekor/pkg/types/README.md new file mode 100644 index 00000000000..76cb36e61ad --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/README.md @@ -0,0 +1,32 @@ +# Pluggable Types + +## Description + +Rekor supports pluggable types (aka different schemas) for entries stored in the transparency log. + +### Currently supported types + +- Alpine Packages [schema](alpine/alpine_schema.json) + - Versions: 0.0.1 +- COSE Envelopes [schema](cose/cose_schema.json) + - Versions: 0.0.1 +- DSSE Envelopes [schema](dsse/dsse_schema.json) + - Versions: 0.0.1 +- HashedRekord [schema](hashedrekord/hashedrekord_schema.json) + - Versions: 0.0.1 +- Helm Provenance Files [schema](helm/helm_schema.json) + - Versions: 0.0.1 +- In-Toto Attestations [schema](intoto/intoto_schema.json) + - Versions: 0.0.1, 0.0.2 +- Java Archives (JAR Files) [schema](jar/jar_schema.json) + - Versions: 0.0.1 +- Rekord *(default type)* [schema](rekord/rekord_schema.json) + - Versions: 0.0.1 +- RFC3161 Timestamps [schema](rfc3161/rfc3161_schema.json) + - Versions: 0.0.1 +- RPM Packages [schema](rpm/rpm_schema.json) + - Versions: 0.0.1 +- TUF Metadata [schema](tuf/tuf_schema.json) + - Versions: 0.0.1 + +Refer to [Rekor docs](https://docs.sigstore.dev/rekor/pluggable-types) for adding support for new types. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md b/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md new file mode 100644 index 00000000000..3244c98ec9a --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md @@ -0,0 +1,25 @@ +**DSSE Type Data Documentation** + +This document provides a definition for each field that is not otherwise described in the [dsse +schema](https://github.com/sigstore/rekor/blob/main/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json). This +document also notes any additional information about the values +associated with each field such as the format in which the data is +stored and any necessary transformations. + +**How do you identify an object as an DSSE object?** + +The "Body" field will include an "dsseObj" field. + +**Recognized content types** + +- [in-toto + statements](https://github.com/in-toto/attestation/tree/main/spec#statement) + are recognized and parsed. The found subject hashes are indexed so + they can be searched for. + +**What data about the envelope is stored in Rekor** + +Only the hash of the payload (the content covered by the digital signature inside the envelope), the hash of the entire DSSE envelope (including signatures), +the signature(s) and their corresponding verifying materials (e.g. public key(s) or certificates) are stored. + +Even if Rekor is configured to use attestation storage, the entire DSSE envelope will not be stored. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go new file mode 100644 index 00000000000..9036fe5625c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go @@ -0,0 +1,74 @@ +// +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "context" + "errors" + "fmt" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" +) + +const ( + KIND = "dsse" +) + +type BaseDSSEType struct { + types.RekorType +} + +func init() { + types.TypeMap.Store(KIND, New) +} + +func New() types.TypeImpl { + bit := BaseDSSEType{} + bit.Kind = KIND + bit.VersionMap = VersionMap + return &bit +} + +var VersionMap = types.NewSemVerEntryFactoryMap() + +func (it BaseDSSEType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + in, ok := pe.(*models.DSSE) + if !ok { + return nil, errors.New("cannot unmarshal non-DSSE types") + } + + return it.VersionedUnmarshal(in, *in.APIVersion) +} + +func (it *BaseDSSEType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) { + if version == "" { + version = it.DefaultVersion() + } + ei, err := it.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching DSSE version implementation: %w", err) + } + return ei.CreateFromArtifactProperties(ctx, props) +} + +func (it BaseDSSEType) DefaultVersion() string { + return "0.0.1" +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json new file mode 100644 index 00000000000..7dc710fe215 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/dsse/dsse_schema.json", + "title": "DSSE Schema", + "description": "log entry schema for dsse envelopes", + "type": "object", + "oneOf": [ + { + "$ref": "v0.0.1/dsse_v0_0_1_schema.json" + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json new file mode 100644 index 00000000000..51ebe3990af --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json @@ -0,0 +1,96 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/dsse/dsse_v0_0_1_schema.json", + "title": "DSSE v0.0.1 Schema", + "description": "Schema for DSSE envelopes", + "type": "object", + "properties": { + "proposedContent": { + "type": "object", + "properties": { + "envelope": { + "description": "DSSE envelope specified as a stringified JSON object", + "type": "string", + "writeOnly": true + }, + "verifiers": { + "description": "collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings", + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "format": "byte" + }, + "writeOnly": true + } + }, + "writeOnly": true, + "required": [ "envelope", "verifiers" ] + }, + "signatures": { + "description": "extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings", + "type": "array", + "minItems": 1, + "items": { + "description": "a signature of the envelope's payload along with the verification material for the signature", + "type": "object", + "properties": { + "signature": { + "description": "base64 encoded signature of the payload", + "type": "string", + "pattern": "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + }, + "verifier": { + "description": "verification material that was used to verify the corresponding signature, specified as a base64 encoded string", + "type": "string", + "format": "byte" + } + }, + "required": [ "signature", "verifier" ] + }, + "readOnly": true + }, + "envelopeHash": { + "description": "Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256" ] + }, + "value": { + "description": "The value of the computed digest over the entire envelope", + "type": "string" + } + }, + "required": [ "algorithm", "value" ], + "readOnly": true + }, + "payloadHash": { + "description": "Specifies the hash algorithm and value covering the payload within the DSSE envelope", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256" ] + }, + "value": { + "description": "The value of the computed digest over the payload within the envelope", + "type": "string" + } + }, + "required": [ "algorithm", "value" ], + "readOnly": true + } + }, + "oneOf": [ + { + "required": [ "proposedContent" ] + }, + { + "required": [ "signatures", "envelopeHash", "payloadHash" ] + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go new file mode 100644 index 00000000000..75fac99bdf3 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go @@ -0,0 +1,544 @@ +// +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "crypto" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + "github.com/in-toto/in-toto-golang/in_toto" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" + "github.com/sigstore/rekor/pkg/pki/x509" + "github.com/sigstore/rekor/pkg/types" + dsseType "github.com/sigstore/rekor/pkg/types/dsse" + "github.com/sigstore/sigstore/pkg/signature" + sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse" +) + +const ( + APIVERSION = "0.0.1" +) + +func init() { + if err := dsseType.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { + log.Logger.Panic(err) + } +} + +type V001Entry struct { + DSSEObj models.DSSEV001Schema + env *dsse.Envelope +} + +func (v V001Entry) APIVersion() string { + return APIVERSION +} + +func NewEntry() types.EntryImpl { + return &V001Entry{} +} + +// IndexKeys computes the list of keys that should map back to this entry. +// It should *never* reference v.DSSEObj.ProposedContent as those values would only +// be present at the time of insertion +func (v V001Entry) IndexKeys() ([]string, error) { + var result []string + + for _, sig := range v.DSSEObj.Signatures { + if sig == nil || sig.Verifier == nil { + return result, errors.New("missing or malformed public key") + } + keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.Verifier)) + if err != nil { + return result, err + } + + canonKey, err := keyObj.CanonicalValue() + if err != nil { + return result, fmt.Errorf("could not canonicalize key: %w", err) + } + + keyHash := sha256.Sum256(canonKey) + result = append(result, "sha256:"+hex.EncodeToString(keyHash[:])) + + result = append(result, keyObj.Subjects()...) + } + + if v.DSSEObj.PayloadHash != nil { + payloadHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value)) + result = append(result, payloadHashKey) + } + + if v.DSSEObj.EnvelopeHash != nil { + envelopeHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.EnvelopeHash.Algorithm, *v.DSSEObj.EnvelopeHash.Value)) + result = append(result, envelopeHashKey) + } + + if v.env == nil { + log.Logger.Info("DSSEObj content or DSSE envelope is nil, returning partial set of keys") + return result, nil + } + + switch v.env.PayloadType { + case in_toto.PayloadType: + + if v.env.Payload == "" { + log.Logger.Info("DSSEObj DSSE payload is empty") + return result, nil + } + decodedPayload, err := v.env.DecodeB64Payload() + if err != nil { + return result, fmt.Errorf("could not decode envelope payload: %w", err) + } + statement, err := parseStatement(decodedPayload) + if err != nil { + return result, err + } + for _, s := range statement.Subject { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + // Not all in-toto statements will contain a SLSA provenance predicate. + // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate + // for other predicates. + if predicate, err := parseSlsaPredicate(decodedPayload); err == nil { + if predicate.Predicate.Materials != nil { + for _, s := range predicate.Predicate.Materials { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + } + } + default: + log.Logger.Infof("Unknown DSSE envelope payloadType: %s", v.env.PayloadType) + } + return result, nil +} + +func parseStatement(p []byte) (*in_toto.Statement, error) { + ps := in_toto.Statement{} + if err := json.Unmarshal(p, &ps); err != nil { + return nil, err + } + return &ps, nil +} + +func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) { + predicate := in_toto.ProvenanceStatement{} + if err := json.Unmarshal(p, &predicate); err != nil { + return nil, err + } + return &predicate, nil +} + +// DecodeEntry performs direct decode into the provided output pointer +// without mutating the receiver on error. +func DecodeEntry(input any, output *models.DSSEV001Schema) error { + if output == nil { + return fmt.Errorf("nil output *models.DSSEV001Schema") + } + var m models.DSSEV001Schema + // Single switch with map fast path + switch data := input.(type) { + case map[string]any: + mm := data + if pcRaw, ok := mm["proposedContent"].(map[string]any); ok { + m.ProposedContent = &models.DSSEV001SchemaProposedContent{} + if env, ok := pcRaw["envelope"].(string); ok { + m.ProposedContent.Envelope = &env + } + if vsIF, ok := pcRaw["verifiers"].([]any); ok { + m.ProposedContent.Verifiers = make([]strfmt.Base64, 0, len(vsIF)) + for _, it := range vsIF { + if s, ok := it.(string); ok && s != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(outb, []byte(s)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for verifier: %w", err) + } + m.ProposedContent.Verifiers = append(m.ProposedContent.Verifiers, strfmt.Base64(outb[:n])) + } + } + } else if vsStr, ok := pcRaw["verifiers"].([]string); ok { + m.ProposedContent.Verifiers = make([]strfmt.Base64, 0, len(vsStr)) + for _, s := range vsStr { + if s == "" { + continue + } + outb := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(outb, []byte(s)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for verifier: %w", err) + } + m.ProposedContent.Verifiers = append(m.ProposedContent.Verifiers, strfmt.Base64(outb[:n])) + } + } + } + if sigs, ok := mm["signatures"].([]any); ok { + m.Signatures = make([]*models.DSSEV001SchemaSignaturesItems0, 0, len(sigs)) + for _, s := range sigs { + if sm, ok := s.(map[string]any); ok { + item := &models.DSSEV001SchemaSignaturesItems0{} + if sig, ok := sm["signature"].(string); ok { + item.Signature = &sig + } + if vr, ok := sm["verifier"].(string); ok && vr != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(vr))) + n, err := base64.StdEncoding.Decode(outb, []byte(vr)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature verifier: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.Verifier = &b + } + m.Signatures = append(m.Signatures, item) + } + } + } + if eh, ok := mm["envelopeHash"].(map[string]any); ok { + m.EnvelopeHash = &models.DSSEV001SchemaEnvelopeHash{} + if alg, ok := eh["algorithm"].(string); ok { + m.EnvelopeHash.Algorithm = &alg + } + if val, ok := eh["value"].(string); ok { + m.EnvelopeHash.Value = &val + } + } + if ph, ok := mm["payloadHash"].(map[string]any); ok { + m.PayloadHash = &models.DSSEV001SchemaPayloadHash{} + if alg, ok := ph["algorithm"].(string); ok { + m.PayloadHash.Algorithm = &alg + } + if val, ok := ph["value"].(string); ok { + m.PayloadHash.Value = &val + } + } + *output = m + return nil + case *models.DSSEV001Schema: + if data == nil { + return fmt.Errorf("nil *models.DSSEV001Schema") + } + *output = *data + return nil + case models.DSSEV001Schema: + *output = data + return nil + default: + return fmt.Errorf("unsupported input type %T for DecodeEntry", input) + } +} + +func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { + it, ok := pe.(*models.DSSE) + if !ok { + return errors.New("cannot unmarshal non DSSE v0.0.1 type") + } + + dsseObj := &models.DSSEV001Schema{} + + if err := DecodeEntry(it.Spec, dsseObj); err != nil { + return err + } + + // field validation + if err := dsseObj.Validate(strfmt.Default); err != nil { + return err + } + + // either we have just proposed content or the canonicalized fields + if dsseObj.ProposedContent == nil { + // then we need canonicalized fields, and all must be present (if present, they would have been validated in the above call to Validate()) + if dsseObj.EnvelopeHash == nil || dsseObj.PayloadHash == nil || len(dsseObj.Signatures) == 0 { + return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present") + } + v.DSSEObj = *dsseObj + return nil + } + // if we're here, then we're trying to propose a new entry so we check to ensure client's aren't setting server-side computed fields + if dsseObj.EnvelopeHash != nil || dsseObj.PayloadHash != nil || len(dsseObj.Signatures) != 0 { + return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present but not both") + } + + env := &dsse.Envelope{} + if err := json.Unmarshal([]byte(*dsseObj.ProposedContent.Envelope), env); err != nil { + return err + } + + if len(env.Signatures) == 0 { + return errors.New("DSSE envelope must contain 1 or more signatures") + } + + allPubKeyBytes := make([][]byte, 0) + for _, publicKey := range dsseObj.ProposedContent.Verifiers { + if publicKey == nil { + return errors.New("an invalid null verifier was provided in ProposedContent") + } + + allPubKeyBytes = append(allPubKeyBytes, publicKey) + } + + sigToKeyMap, err := verifyEnvelope(allPubKeyBytes, env) + if err != nil { + return err + } + + // we need to ensure we canonicalize the ordering of signatures + sortedSigs := make([]string, 0, len(sigToKeyMap)) + for sig := range sigToKeyMap { + sortedSigs = append(sortedSigs, sig) + } + sort.Strings(sortedSigs) + + for i, sig := range sortedSigs { + key := sigToKeyMap[sig] + canonicalizedKey, err := key.CanonicalValue() + if err != nil { + return err + } + b64CanonicalizedKey := strfmt.Base64(canonicalizedKey) + + dsseObj.Signatures = append(dsseObj.Signatures, &models.DSSEV001SchemaSignaturesItems0{ + Signature: &sortedSigs[i], + Verifier: &b64CanonicalizedKey, + }) + } + + decodedPayload, err := env.DecodeB64Payload() + if err != nil { + // this shouldn't happen because failure would have occurred in verifyEnvelope call above + return err + } + + payloadHash := sha256.Sum256(decodedPayload) + dsseObj.PayloadHash = &models.DSSEV001SchemaPayloadHash{ + Algorithm: conv.Pointer(models.DSSEV001SchemaPayloadHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(payloadHash[:])), + } + + envelopeHash := sha256.Sum256([]byte(*dsseObj.ProposedContent.Envelope)) + dsseObj.EnvelopeHash = &models.DSSEV001SchemaEnvelopeHash{ + Algorithm: conv.Pointer(models.DSSEV001SchemaEnvelopeHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(envelopeHash[:])), + } + + // we've gotten through all processing without error, now update the object we're unmarshalling into + v.DSSEObj = *dsseObj + v.env = env + + return nil +} + +// Canonicalize returns a JSON representation of the entry to be persisted into the log. This +// will be further canonicalized by JSON Canonicalization Scheme (JCS) before being written. +// +// This function should not use v.DSSEObj.ProposedContent fields as they are client provided and +// should not be trusted; the other fields at the top level are only set server side. +func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { + canonicalEntry := models.DSSEV001Schema{ + Signatures: v.DSSEObj.Signatures, + EnvelopeHash: v.DSSEObj.EnvelopeHash, + PayloadHash: v.DSSEObj.PayloadHash, + ProposedContent: nil, // this is explicitly done as we don't want to canonicalize the envelope + } + + for _, s := range canonicalEntry.Signatures { + if s.Signature == nil { + return nil, errors.New("canonical entry missing required signature") + } + } + + sort.Slice(canonicalEntry.Signatures, func(i, j int) bool { + return *canonicalEntry.Signatures[i].Signature < *canonicalEntry.Signatures[j].Signature + }) + + itObj := models.DSSE{} + itObj.APIVersion = conv.Pointer(APIVERSION) + itObj.Spec = &canonicalEntry + + return json.Marshal(&itObj) +} + +// AttestationKey and AttestationKeyValue are not implemented so the envelopes will not be persisted in Rekor + +func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { + returnVal := models.DSSE{} + re := V001Entry{ + DSSEObj: models.DSSEV001Schema{ + ProposedContent: &models.DSSEV001SchemaProposedContent{}, + }, + } + var err error + artifactBytes := props.ArtifactBytes + if artifactBytes == nil { + if props.ArtifactPath == nil { + return nil, errors.New("path to artifact file must be specified") + } + if props.ArtifactPath.IsAbs() { + return nil, errors.New("dsse envelopes cannot be fetched over HTTP(S)") + } + artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) + if err != nil { + return nil, err + } + } + + env := &dsse.Envelope{} + if err := json.Unmarshal(artifactBytes, env); err != nil { + return nil, fmt.Errorf("payload must be a valid DSSE envelope: %w", err) + } + + allPubKeyBytes := make([][]byte, 0) + if len(props.PublicKeyBytes) > 0 { + allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...) + } + + if len(props.PublicKeyPaths) > 0 { + for _, path := range props.PublicKeyPaths { + if path.IsAbs() { + return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)") + } + + publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path)) + if err != nil { + return nil, fmt.Errorf("error reading public key file: %w", err) + } + + allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes) + } + } + + keysBySig, err := verifyEnvelope(allPubKeyBytes, env) + if err != nil { + return nil, err + } + for _, key := range keysBySig { + canonicalKey, err := key.CanonicalValue() + if err != nil { + return nil, err + } + re.DSSEObj.ProposedContent.Verifiers = append(re.DSSEObj.ProposedContent.Verifiers, strfmt.Base64(canonicalKey)) + } + re.DSSEObj.ProposedContent.Envelope = conv.Pointer(string(artifactBytes)) + + returnVal.Spec = re.DSSEObj + returnVal.APIVersion = conv.Pointer(re.APIVersion()) + + return &returnVal, nil +} + +// verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys. +// it then uses these to verify the envelope and makes sure that every signature on the envelope is verified. +// it returns a map of verifiers indexed by the signature the verifier corresponds to. +func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) { + // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature + verifierBySig := make(map[string]*x509.PublicKey) + allSigs := make(map[string]struct{}) + for _, sig := range env.Signatures { + allSigs[sig.Sig] = struct{}{} + } + + for _, pubKeyBytes := range allPubKeyBytes { + if len(allSigs) == 0 { + break // if all signatures have been verified, do not attempt anymore + } + key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes)) + if err != nil { + return nil, fmt.Errorf("could not parse public key as x509: %w", err) + } + + vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("could not load verifier: %w", err) + } + + dsseVfr, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{SignatureVerifier: vfr}) + if err != nil { + return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err) + } + + accepted, err := dsseVfr.Verify(context.Background(), env) + if err != nil { + return nil, fmt.Errorf("could not verify envelope: %w", err) + } + + for _, accept := range accepted { + delete(allSigs, accept.Sig.Sig) + verifierBySig[accept.Sig.Sig] = key + } + } + + if len(allSigs) > 0 { + return nil, errors.New("all signatures must have a key that verifies it") + } + + return verifierBySig, nil +} + +func (v V001Entry) Verifiers() ([]pkitypes.PublicKey, error) { + if len(v.DSSEObj.Signatures) == 0 { + return nil, errors.New("dsse v0.0.1 entry not initialized") + } + + var keys []pkitypes.PublicKey + for _, s := range v.DSSEObj.Signatures { + key, err := x509.NewPublicKey(bytes.NewReader(*s.Verifier)) + if err != nil { + return nil, err + } + keys = append(keys, key) + } + return keys, nil +} + +func (v V001Entry) ArtifactHash() (string, error) { + if v.DSSEObj.PayloadHash == nil || v.DSSEObj.PayloadHash.Algorithm == nil || v.DSSEObj.PayloadHash.Value == nil { + return "", errors.New("dsse v0.0.1 entry not initialized") + } + return strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value)), nil +} + +func (v V001Entry) Insertable() (bool, error) { + if v.DSSEObj.ProposedContent == nil { + return false, errors.New("missing proposed content") + } + if v.DSSEObj.ProposedContent.Envelope == nil || len(*v.DSSEObj.ProposedContent.Envelope) == 0 { + return false, errors.New("missing proposed DSSE envelope") + } + if len(v.DSSEObj.ProposedContent.Verifiers) == 0 { + return false, errors.New("missing proposed verifiers") + } + + return true, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/entries.go b/vendor/github.com/sigstore/rekor/pkg/types/entries.go new file mode 100644 index 00000000000..06a8525bd9a --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/entries.go @@ -0,0 +1,176 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/url" + "reflect" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/go-openapi/strfmt" + "github.com/go-viper/mapstructure/v2" + "github.com/sigstore/rekor/pkg/generated/models" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" +) + +// EntryImpl specifies the behavior of a versioned type +type EntryImpl interface { + APIVersion() string // the supported versions for this implementation + IndexKeys() ([]string, error) // the keys that should be added to the external index for this entry + Canonicalize(ctx context.Context) ([]byte, error) // marshal the canonical entry to be put into the tlog + Unmarshal(e models.ProposedEntry) error // unmarshal the abstract entry into the specific struct for this versioned type + CreateFromArtifactProperties(context.Context, ArtifactProperties) (models.ProposedEntry, error) + Verifiers() ([]pkitypes.PublicKey, error) // list of keys or certificates that can verify an entry's signature + ArtifactHash() (string, error) // hex-encoded artifact hash prefixed with hash name, e.g. sha256:abcdef + Insertable() (bool, error) // denotes whether the entry that was unmarshalled has the writeOnly fields required to validate and insert into the log +} + +// EntryWithAttestationImpl specifies the behavior of a versioned type that also stores attestations +type EntryWithAttestationImpl interface { + EntryImpl + AttestationKey() string // returns the key used to look up the attestation from storage (should be sha256:digest) + AttestationKeyValue() (string, []byte) // returns the key to be used when storing the attestation as well as the attestation itself +} + +// ProposedEntryIterator is an iterator over a list of proposed entries +type ProposedEntryIterator interface { + models.ProposedEntry + HasNext() bool + Get() models.ProposedEntry + GetNext() models.ProposedEntry +} + +// EntryFactory describes a factory function that can generate structs for a specific versioned type +type EntryFactory func() EntryImpl + +func NewProposedEntry(ctx context.Context, kind, version string, props ArtifactProperties) (models.ProposedEntry, error) { + if tf, found := TypeMap.Load(kind); found { + t := tf.(func() TypeImpl)() + if t == nil { + return nil, fmt.Errorf("error generating object for kind '%v'", kind) + } + return t.CreateProposedEntry(ctx, version, props) + } + return nil, fmt.Errorf("could not create entry for kind '%v'", kind) +} + +// CreateVersionedEntry returns the specific instance for the type and version specified in the doc +// This method should be used on the insertion flow, which validates that the specific version proposed +// is permitted to be entered into the log. +func CreateVersionedEntry(pe models.ProposedEntry) (EntryImpl, error) { + ei, err := UnmarshalEntry(pe) + if err != nil { + return nil, err + } + kind := pe.Kind() + if tf, found := TypeMap.Load(kind); found { + if !tf.(func() TypeImpl)().IsSupportedVersion(ei.APIVersion()) { + return nil, fmt.Errorf("entry kind '%v' does not support inserting entries of version '%v'", kind, ei.APIVersion()) + } + } else { + return nil, fmt.Errorf("unknown kind '%v' specified", kind) + } + + if ok, err := ei.Insertable(); !ok { + return nil, fmt.Errorf("entry not insertable into log: %w", err) + } + + return ei, nil +} + +// UnmarshalEntry returns the specific instance for the type and version specified in the doc +// This method does not check for whether the version of the entry could be currently inserted into the log, +// and is useful when dealing with entries that have been persisted to the log. +func UnmarshalEntry(pe models.ProposedEntry) (EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + kind := pe.Kind() + if tf, found := TypeMap.Load(kind); found { + t := tf.(func() TypeImpl)() + if t == nil { + return nil, fmt.Errorf("error generating object for kind '%v'", kind) + } + return t.UnmarshalEntry(pe) + } + return nil, fmt.Errorf("could not unmarshal entry for kind '%v'", kind) +} + +// DecodeEntry maps the (abstract) input structure into the specific entry implementation class; +// while doing so, it detects the case where we need to convert from string to []byte and does +// the base64 decoding required to make that happen. +// This also detects converting from string to strfmt.DateTime +func DecodeEntry(input, output interface{}) error { + cfg := mapstructure.DecoderConfig{ + DecodeHook: func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice && t != reflect.TypeOf(strfmt.DateTime{}) { + return data, nil + } + + if data == nil { + return nil, errors.New("attempted to decode nil data") + } + + if t == reflect.TypeOf(strfmt.DateTime{}) { + return strfmt.ParseDateTime(data.(string)) + } + + bytes, err := base64.StdEncoding.DecodeString(data.(string)) + if err != nil { + return []byte{}, fmt.Errorf("failed parsing base64 data: %w", err) + } + return bytes, nil + }, + Result: output, + } + + dec, err := mapstructure.NewDecoder(&cfg) + if err != nil { + return fmt.Errorf("error initializing decoder: %w", err) + } + + return dec.Decode(input) +} + +// CanonicalizeEntry returns the entry marshalled in JSON according to the +// canonicalization rules of RFC8785 to protect against any changes in golang's JSON +// marshalling logic that may reorder elements +func CanonicalizeEntry(ctx context.Context, entry EntryImpl) ([]byte, error) { + canonicalEntry, err := entry.Canonicalize(ctx) + if err != nil { + return nil, err + } + return jsoncanonicalizer.Transform(canonicalEntry) +} + +// ArtifactProperties provide a consistent struct for passing values from +// CLI flags to the type+version specific CreateProposeEntry() methods +type ArtifactProperties struct { + AdditionalAuthenticatedData []byte + ArtifactPath *url.URL + ArtifactHash string + ArtifactBytes []byte + SignaturePath *url.URL + SignatureBytes []byte + PublicKeyPaths []*url.URL + PublicKeyBytes [][]byte + PKIFormat string +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/error.go b/vendor/github.com/sigstore/rekor/pkg/types/error.go new file mode 100644 index 00000000000..57f0c77518b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/error.go @@ -0,0 +1,31 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// ValidationError indicates that there is an issue with the content in the HTTP Request that +// should result in an HTTP 400 Bad Request error being returned to the client +// +// Deprecated: use InputValidationError instead to take advantage of Go's error wrapping +type ValidationError error + +// InputValidationError indicates that there is an issue with the content in the HTTP Request that +// should result in an HTTP 400 Bad Request error being returned to the client +type InputValidationError struct { + Err error +} + +func (v *InputValidationError) Error() string { return v.Err.Error() } +func (v *InputValidationError) Unwrap() error { return v.Err } diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go new file mode 100644 index 00000000000..66395c7a05a --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go @@ -0,0 +1,75 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashedrekord + +import ( + "context" + "errors" + "fmt" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" +) + +const ( + KIND = "hashedrekord" +) + +type BaseRekordType struct { + types.RekorType +} + +func init() { + types.TypeMap.Store(KIND, New) +} + +func New() types.TypeImpl { + brt := BaseRekordType{} + brt.Kind = KIND + brt.VersionMap = VersionMap + return &brt +} + +var VersionMap = types.NewSemVerEntryFactoryMap() + +func (rt BaseRekordType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + rekord, ok := pe.(*models.Hashedrekord) + if !ok { + return nil, fmt.Errorf("cannot unmarshal non-hashed Rekord types: %s", pe.Kind()) + } + + return rt.VersionedUnmarshal(rekord, *rekord.APIVersion) +} + +func (rt *BaseRekordType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) { + if version == "" { + version = rt.DefaultVersion() + } + ei, err := rt.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching hashed Rekord version implementation: %w", err) + } + + return ei.CreateFromArtifactProperties(ctx, props) +} + +func (rt BaseRekordType) DefaultVersion() string { + return "0.0.1" +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json new file mode 100644 index 00000000000..be9beaeb6a2 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/hashedrekord/hasehedrekord_schema.json", + "title": "Hashedrekord Schema", + "description": "Schema for Hashedrekord objects", + "type": "object", + "oneOf": [ + { + "$ref": "v0.0.1/hashedrekord_v0_0_1_schema.json" + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go new file mode 100644 index 00000000000..7d0ef04e61f --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go @@ -0,0 +1,377 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashedrekord + +import ( + "bytes" + "context" + "crypto" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" + "github.com/sigstore/rekor/pkg/pki/x509" + "github.com/sigstore/rekor/pkg/types" + hashedrekord "github.com/sigstore/rekor/pkg/types/hashedrekord" + "github.com/sigstore/rekor/pkg/util" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const ( + APIVERSION = "0.0.1" +) + +func init() { + if err := hashedrekord.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { + log.Logger.Panic(err) + } +} + +type V001Entry struct { + HashedRekordObj models.HashedrekordV001Schema +} + +func (v V001Entry) APIVersion() string { + return APIVERSION +} + +func NewEntry() types.EntryImpl { + return &V001Entry{} +} + +func (v V001Entry) IndexKeys() ([]string, error) { + var result []string + + key := v.HashedRekordObj.Signature.PublicKey.Content + keyHash := sha256.Sum256(key) + result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) + + pub, err := x509.NewPublicKey(bytes.NewReader(key)) + if err != nil { + return nil, err + } + result = append(result, pub.Subjects()...) + + if v.HashedRekordObj.Data.Hash != nil { + hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)) + result = append(result, hashKey) + } + + return result, nil +} + +// DecodeEntry performs direct decode into the provided output pointer +// without mutating the receiver on error. +func DecodeEntry(input any, output *models.HashedrekordV001Schema) error { + if output == nil { + return fmt.Errorf("nil output *models.HashedrekordV001Schema") + } + var m models.HashedrekordV001Schema + // Single type-switch with map fast path + switch data := input.(type) { + case map[string]any: + mm := data + if sigRaw, ok := mm["signature"].(map[string]any); ok { + m.Signature = &models.HashedrekordV001SchemaSignature{} + if c, ok := sigRaw["content"].(string); ok && c != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(c))) + n, err := base64.StdEncoding.Decode(outb, []byte(c)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature content: %w", err) + } + m.Signature.Content = outb[:n] + } + if pkRaw, ok := sigRaw["publicKey"].(map[string]any); ok { + m.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} + if c, ok := pkRaw["content"].(string); ok && c != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(c))) + n, err := base64.StdEncoding.Decode(outb, []byte(c)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for public key content: %w", err) + } + m.Signature.PublicKey.Content = outb[:n] + } + } + } + if dataRaw, ok := mm["data"].(map[string]any); ok { + if hRaw, ok := dataRaw["hash"].(map[string]any); ok { + m.Data = &models.HashedrekordV001SchemaData{Hash: &models.HashedrekordV001SchemaDataHash{}} + if alg, ok := hRaw["algorithm"].(string); ok { + m.Data.Hash.Algorithm = &alg + } + if val, ok := hRaw["value"].(string); ok { + m.Data.Hash.Value = &val + } + } + } + *output = m + return nil + case *models.HashedrekordV001Schema: + if data == nil { + return fmt.Errorf("nil *models.HashedrekordV001Schema") + } + *output = *data + return nil + case models.HashedrekordV001Schema: + *output = data + return nil + default: + return fmt.Errorf("unsupported input type %T for DecodeEntry", input) + } +} + +func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { + rekord, ok := pe.(*models.Hashedrekord) + if !ok { + return errors.New("cannot unmarshal non Rekord v0.0.1 type") + } + + if err := DecodeEntry(rekord.Spec, &v.HashedRekordObj); err != nil { + return err + } + + // field validation + if err := v.HashedRekordObj.Validate(strfmt.Default); err != nil { + return err + } + + // cross field validation + _, _, err := v.validate() + return err +} + +func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { + sigObj, keyObj, err := v.validate() + if err != nil { + return nil, &types.InputValidationError{Err: err} + } + + canonicalEntry := models.HashedrekordV001Schema{} + + // need to canonicalize signature & key content + canonicalEntry.Signature = &models.HashedrekordV001SchemaSignature{} + canonicalEntry.Signature.Content, err = sigObj.CanonicalValue() + if err != nil { + return nil, err + } + + // key URL (if known) is not set deliberately + canonicalEntry.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} + canonicalEntry.Signature.PublicKey.Content, err = keyObj.CanonicalValue() + if err != nil { + return nil, err + } + + canonicalEntry.Data = &models.HashedrekordV001SchemaData{} + canonicalEntry.Data.Hash = v.HashedRekordObj.Data.Hash + // data content is not set deliberately + + v.HashedRekordObj = canonicalEntry + // wrap in valid object with kind and apiVersion set + rekordObj := models.Hashedrekord{} + rekordObj.APIVersion = conv.Pointer(APIVERSION) + rekordObj.Spec = &canonicalEntry + + return json.Marshal(&rekordObj) +} + +// validate performs cross-field validation for fields in object +func (v *V001Entry) validate() (pkitypes.Signature, pkitypes.PublicKey, error) { + sig := v.HashedRekordObj.Signature + if sig == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing signature")} + } + // Hashed rekord type only works for x509 signature types + sigObj, err := x509.NewSignatureWithOpts(bytes.NewReader(sig.Content), options.WithED25519ph()) + if err != nil { + return nil, nil, &types.InputValidationError{Err: err} + } + + key := sig.PublicKey + if key == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing public key")} + } + keyObj, err := x509.NewPublicKey(bytes.NewReader(key.Content)) + if err != nil { + return nil, nil, &types.InputValidationError{Err: err} + } + + data := v.HashedRekordObj.Data + if data == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing data")} + } + + hash := data.Hash + if hash == nil { + return nil, nil, &types.InputValidationError{Err: errors.New("missing hash")} + } + + var alg crypto.Hash + switch conv.Value(hash.Algorithm) { + case models.HashedrekordV001SchemaDataHashAlgorithmSha384: + if len(*hash.Value) != crypto.SHA384.Size()*2 { + return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} + } + alg = crypto.SHA384 + case models.HashedrekordV001SchemaDataHashAlgorithmSha512: + if len(*hash.Value) != crypto.SHA512.Size()*2 { + return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} + } + alg = crypto.SHA512 + default: + if len(*hash.Value) != crypto.SHA256.Size()*2 { + return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} + } + alg = crypto.SHA256 + } + + decoded, err := hex.DecodeString(*hash.Value) + if err != nil { + return nil, nil, err + } + if err := sigObj.Verify(nil, keyObj, options.WithDigest(decoded), options.WithCryptoSignerOpts(alg)); err != nil { + return nil, nil, &types.InputValidationError{Err: fmt.Errorf("verifying signature: %w", err)} + } + + return sigObj, keyObj, nil +} + +func getDataHashAlgorithm(hashAlgorithm crypto.Hash) string { + switch hashAlgorithm { + case crypto.SHA384: + return models.HashedrekordV001SchemaDataHashAlgorithmSha384 + case crypto.SHA512: + return models.HashedrekordV001SchemaDataHashAlgorithmSha512 + default: + return models.HashedrekordV001SchemaDataHashAlgorithmSha256 + } +} + +func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { + returnVal := models.Hashedrekord{} + re := V001Entry{} + + // we will need artifact, public-key, signature + re.HashedRekordObj.Data = &models.HashedrekordV001SchemaData{} + + var err error + + if props.PKIFormat != "x509" { + return nil, errors.New("hashedrekord entries can only be created for artifacts signed with x509-based PKI") + } + + re.HashedRekordObj.Signature = &models.HashedrekordV001SchemaSignature{} + sigBytes := props.SignatureBytes + if sigBytes == nil { + if props.SignaturePath == nil { + return nil, errors.New("a detached signature must be provided") + } + sigBytes, err = os.ReadFile(filepath.Clean(props.SignaturePath.Path)) + if err != nil { + return nil, fmt.Errorf("error reading signature file: %w", err) + } + } + re.HashedRekordObj.Signature.Content = strfmt.Base64(sigBytes) + + re.HashedRekordObj.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} + publicKeyBytes := props.PublicKeyBytes + if len(publicKeyBytes) == 0 { + if len(props.PublicKeyPaths) != 1 { + return nil, errors.New("only one public key must be provided to verify detached signature") + } + keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) + if err != nil { + return nil, fmt.Errorf("error reading public key file: %w", err) + } + publicKeyBytes = append(publicKeyBytes, keyBytes) + } else if len(publicKeyBytes) != 1 { + return nil, errors.New("only one public key must be provided") + } + + hashAlgorithm, hashValue := util.UnprefixSHA(props.ArtifactHash) + re.HashedRekordObj.Signature.PublicKey.Content = strfmt.Base64(publicKeyBytes[0]) + re.HashedRekordObj.Data.Hash = &models.HashedrekordV001SchemaDataHash{ + Algorithm: conv.Pointer(getDataHashAlgorithm(hashAlgorithm)), + Value: conv.Pointer(hashValue), + } + + if _, _, err := re.validate(); err != nil { + return nil, err + } + + returnVal.APIVersion = conv.Pointer(re.APIVersion()) + returnVal.Spec = re.HashedRekordObj + + return &returnVal, nil +} + +func (v V001Entry) Verifiers() ([]pkitypes.PublicKey, error) { + if v.HashedRekordObj.Signature == nil || v.HashedRekordObj.Signature.PublicKey == nil || v.HashedRekordObj.Signature.PublicKey.Content == nil { + return nil, errors.New("hashedrekord v0.0.1 entry not initialized") + } + key, err := x509.NewPublicKey(bytes.NewReader(v.HashedRekordObj.Signature.PublicKey.Content)) + if err != nil { + return nil, err + } + return []pkitypes.PublicKey{key}, nil +} + +func (v V001Entry) ArtifactHash() (string, error) { + if v.HashedRekordObj.Data == nil || v.HashedRekordObj.Data.Hash == nil || v.HashedRekordObj.Data.Hash.Value == nil || v.HashedRekordObj.Data.Hash.Algorithm == nil { + return "", errors.New("hashedrekord v0.0.1 entry not initialized") + } + return strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)), nil +} + +func (v V001Entry) Insertable() (bool, error) { + if v.HashedRekordObj.Signature == nil { + return false, errors.New("missing signature property") + } + if len(v.HashedRekordObj.Signature.Content) == 0 { + return false, errors.New("missing signature content") + } + if v.HashedRekordObj.Signature.PublicKey == nil { + return false, errors.New("missing publicKey property") + } + if len(v.HashedRekordObj.Signature.PublicKey.Content) == 0 { + return false, errors.New("missing publicKey content") + } + if v.HashedRekordObj.Data == nil { + return false, errors.New("missing data property") + } + if v.HashedRekordObj.Data.Hash == nil { + return false, errors.New("missing hash property") + } + if v.HashedRekordObj.Data.Hash.Algorithm == nil { + return false, errors.New("missing hash algorithm") + } + if v.HashedRekordObj.Data.Hash.Value == nil { + return false, errors.New("missing hash value") + } + return true, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json new file mode 100644 index 00000000000..3d536eb49e1 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json @@ -0,0 +1,54 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/rekord/hashedrekord_v0_0_1_schema.json", + "title": "Hashed Rekor v0.0.1 Schema", + "description": "Schema for Hashed Rekord object", + "type": "object", + "properties": { + "signature": { + "description": "Information about the detached signature associated with the entry", + "type": "object", + "properties": { + "content": { + "description": "Specifies the content of the signature inline within the document", + "type": "string", + "format": "byte" + }, + "publicKey" : { + "description": "The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information", + "type": "object", + "properties": { + "content": { + "description": "Specifies the content of the public key or code signing certificate inline within the document", + "type": "string", + "format": "byte" + } + } + } + } + }, + "data": { + "description": "Information about the content associated with the entry", + "type": "object", + "properties": { + "hash": { + "description": "Specifies the hash algorithm and value for the content", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256", "sha384", "sha512" ] + }, + "value": { + "description": "The hash value for the content, as represented by a lower case hexadecimal string", + "type": "string" + } + }, + "required": [ "algorithm", "value" ] + } + } + } + }, + "required": [ "signature", "data" ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md new file mode 100644 index 00000000000..0c4d1d73faf --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md @@ -0,0 +1,13 @@ +**in-toto Type Data Documentation** + +This document provides a definition for each field that is not otherwise described in the [in-toto schema](https://github.com/sigstore/rekor/blob/main/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json). This document also notes any additional information about the values associated with each field such as the format in which the data is stored and any necessary transformations. + +**Attestation:** authenticated, machine-readable metadata about one or more software artifacts. [SLSA definition](https://github.com/slsa-framework/slsa/blob/main/controls/attestations.md) +- The Attestation value ought to be a Base64-encoded JSON object. +- The [in-toto Attestation specification](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement) provides detailed guidance on understanding and parsing this JSON object. + +**AttestationType:** Identifies the type of attestation being made, such as a provenance attestation or a vulnerability scan attestation. AttestationType's value, even when prefixed with an http, is not necessarily a working URL. + +**How do you identify an object as an in-toto object?** + +The "Body" field will include an "IntotoObj" field. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go new file mode 100644 index 00000000000..2bfba39468b --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go @@ -0,0 +1,135 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package intoto + +import ( + "context" + "errors" + "fmt" + "slices" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + "github.com/sigstore/rekor/pkg/types" +) + +const ( + KIND = "intoto" +) + +type BaseIntotoType struct { + types.RekorType +} + +func init() { + types.TypeMap.Store(KIND, New) +} + +func New() types.TypeImpl { + bit := BaseIntotoType{} + bit.Kind = KIND + bit.VersionMap = VersionMap + return &bit +} + +var VersionMap = types.NewSemVerEntryFactoryMap() + +func (it BaseIntotoType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) { + if pe == nil { + return nil, errors.New("proposed entry cannot be nil") + } + + in, ok := pe.(*models.Intoto) + if !ok { + return nil, errors.New("cannot unmarshal non-Rekord types") + } + + return it.VersionedUnmarshal(in, *in.APIVersion) +} + +func (it *BaseIntotoType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) { + var head ProposedIntotoEntryIterator + var next *ProposedIntotoEntryIterator + if version == "" { + // get default version as head of list + version = it.DefaultVersion() + ei, err := it.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching default Intoto version implementation: %w", err) + } + pe, err := ei.CreateFromArtifactProperties(ctx, props) + if err != nil { + return nil, fmt.Errorf("creating default Intoto entry: %w", err) + } + head.ProposedEntry = pe + next = &head + for _, v := range it.SupportedVersions() { + if v == it.DefaultVersion() { + continue + } + ei, err := it.VersionedUnmarshal(nil, v) + if err != nil { + log.Logger.Errorf("fetching Intoto version (%v) implementation: %w", v, err) + continue + } + versionedPE, err := ei.CreateFromArtifactProperties(ctx, props) + if err != nil { + log.Logger.Errorf("error creating Intoto entry of version (%v): %w", v, err) + continue + } + next.next = &ProposedIntotoEntryIterator{versionedPE, nil} + next = next.next.(*ProposedIntotoEntryIterator) + } + return head, nil + } + + ei, err := it.VersionedUnmarshal(nil, version) + if err != nil { + return nil, fmt.Errorf("fetching Intoto version implementation: %w", err) + } + return ei.CreateFromArtifactProperties(ctx, props) +} + +func (it BaseIntotoType) DefaultVersion() string { + return "0.0.2" +} + +// SupportedVersions returns the supported versions for this type in the order of preference +func (it BaseIntotoType) SupportedVersions() []string { + return []string{"0.0.2", "0.0.1"} +} + +// IsSupportedVersion returns true if the version can be inserted into the log, and false if not +func (it *BaseIntotoType) IsSupportedVersion(proposedVersion string) bool { + return slices.Contains(it.SupportedVersions(), proposedVersion) +} + +type ProposedIntotoEntryIterator struct { + models.ProposedEntry + next models.ProposedEntry +} + +func (p ProposedIntotoEntryIterator) HasNext() bool { + return p.next != nil +} + +func (p ProposedIntotoEntryIterator) GetNext() models.ProposedEntry { + return p.next +} + +func (p ProposedIntotoEntryIterator) Get() models.ProposedEntry { + return p.ProposedEntry +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json new file mode 100644 index 00000000000..16f6172afa7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json @@ -0,0 +1,15 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/intoto/intoto_schema.json", + "title": "Intoto Schema", + "description": "Intoto for Rekord objects", + "type": "object", + "oneOf": [ + { + "$ref": "v0.0.1/intoto_v0_0_1_schema.json" + }, + { + "$ref": "v0.0.2/intoto_v0_0_2_schema.json" + } + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go new file mode 100644 index 00000000000..ef895151c33 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go @@ -0,0 +1,648 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package intoto + +import ( + "bytes" + "context" + "crypto" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/in-toto/in-toto-golang/in_toto" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/internal/log" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" + "github.com/sigstore/rekor/pkg/pki/x509" + "github.com/sigstore/rekor/pkg/types" + "github.com/sigstore/rekor/pkg/types/intoto" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const ( + APIVERSION = "0.0.2" +) + +func init() { + if err := intoto.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { + log.Logger.Panic(err) + } +} + +var maxAttestationSize = 100 * 1024 + +func SetMaxAttestationSize(limit int) { + maxAttestationSize = limit +} + +type V002Entry struct { + IntotoObj models.IntotoV002Schema + env dsse.Envelope +} + +func (v V002Entry) APIVersion() string { + return APIVERSION +} + +func NewEntry() types.EntryImpl { + return &V002Entry{} +} + +func (v V002Entry) IndexKeys() ([]string, error) { + var result []string + + if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil { + log.Logger.Info("IntotoObj content or dsse envelope is nil") + return result, nil + } + + for _, sig := range v.IntotoObj.Content.Envelope.Signatures { + if sig == nil || sig.PublicKey == nil { + return result, errors.New("malformed or missing signature") + } + keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.PublicKey)) + if err != nil { + return result, err + } + + canonKey, err := keyObj.CanonicalValue() + if err != nil { + return result, fmt.Errorf("could not canonicize key: %w", err) + } + + keyHash := sha256.Sum256(canonKey) + result = append(result, "sha256:"+hex.EncodeToString(keyHash[:])) + + result = append(result, keyObj.Subjects()...) + } + + payloadKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)) + result = append(result, payloadKey) + + // since we can't deterministically calculate this server-side (due to public keys being added inline, and also canonicalization being potentially different), + // we'll just skip adding this index key + // hashkey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.Hash.Algorithm, *v.IntotoObj.Content.Hash.Value)) + // result = append(result, hashkey) + + switch *v.IntotoObj.Content.Envelope.PayloadType { + case in_toto.PayloadType: + + if v.IntotoObj.Content.Envelope.Payload == nil { + log.Logger.Info("IntotoObj DSSE payload is empty") + return result, nil + } + decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload)) + if err != nil { + return result, fmt.Errorf("could not decode envelope payload: %w", err) + } + statement, err := parseStatement(decodedPayload) + if err != nil { + return result, err + } + for _, s := range statement.Subject { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + // Not all in-toto statements will contain a SLSA provenance predicate. + // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate + // for other predicates. + if predicate, err := parseSlsaPredicate(decodedPayload); err == nil { + if predicate.Predicate.Materials != nil { + for _, s := range predicate.Predicate.Materials { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + } + } + default: + log.Logger.Infof("Unknown in_toto DSSE envelope Type: %s", *v.IntotoObj.Content.Envelope.PayloadType) + } + return result, nil +} + +func parseStatement(p []byte) (*in_toto.Statement, error) { + ps := in_toto.Statement{} + if err := json.Unmarshal(p, &ps); err != nil { + return nil, err + } + return &ps, nil +} + +func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) { + predicate := in_toto.ProvenanceStatement{} + if err := json.Unmarshal(p, &predicate); err != nil { + return nil, err + } + return &predicate, nil +} + +// DecodeEntry performs direct decode into the provided output pointer +// without mutating the receiver on error. +func DecodeEntry(input any, output *models.IntotoV002Schema) error { + if output == nil { + return fmt.Errorf("nil output *models.IntotoV002Schema") + } + var m models.IntotoV002Schema + switch in := input.(type) { + case map[string]any: + mm := in + m.Content = &models.IntotoV002SchemaContent{Envelope: &models.IntotoV002SchemaContentEnvelope{}} + if c, ok := mm["content"].(map[string]any); ok { + if env, ok := c["envelope"].(map[string]any); ok { + if pt, ok := env["payloadType"].(string); ok { + m.Content.Envelope.PayloadType = &pt + } + if p, ok := env["payload"].(string); ok && p != "" { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(p))) + n, err := base64.StdEncoding.Decode(outb, []byte(p)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for payload: %w", err) + } + m.Content.Envelope.Payload = strfmt.Base64(outb[:n]) + } + if raw, ok := env["signatures"]; ok { + switch sigs := raw.(type) { + case []any: + m.Content.Envelope.Signatures = make([]*models.IntotoV002SchemaContentEnvelopeSignaturesItems0, 0, len(sigs)) + for _, s := range sigs { + if sm, ok := s.(map[string]any); ok { + item := &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{} + if kid, ok := sm["keyid"].(string); ok { + item.Keyid = kid + } + if sig, ok := sm["sig"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(sig))) + n, err := base64.StdEncoding.Decode(outb, []byte(sig)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.Sig = &b + } + if pk, ok := sm["publicKey"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(pk))) + n, err := base64.StdEncoding.Decode(outb, []byte(pk)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for public key: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.PublicKey = &b + } + m.Content.Envelope.Signatures = append(m.Content.Envelope.Signatures, item) + } + } + case []map[string]any: + m.Content.Envelope.Signatures = make([]*models.IntotoV002SchemaContentEnvelopeSignaturesItems0, 0, len(sigs)) + for _, sm := range sigs { + item := &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{} + if kid, ok := sm["keyid"].(string); ok { + item.Keyid = kid + } + if sig, ok := sm["sig"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(sig))) + n, err := base64.StdEncoding.Decode(outb, []byte(sig)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for signature: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.Sig = &b + } + if pk, ok := sm["publicKey"].(string); ok { + outb := make([]byte, base64.StdEncoding.DecodedLen(len(pk))) + n, err := base64.StdEncoding.Decode(outb, []byte(pk)) + if err != nil { + return fmt.Errorf("failed parsing base64 data for public key: %w", err) + } + b := strfmt.Base64(outb[:n]) + item.PublicKey = &b + } + m.Content.Envelope.Signatures = append(m.Content.Envelope.Signatures, item) + } + } + } + } + if h, ok := c["hash"].(map[string]any); ok { + m.Content.Hash = &models.IntotoV002SchemaContentHash{} + if alg, ok := h["algorithm"].(string); ok { + m.Content.Hash.Algorithm = &alg + } + if val, ok := h["value"].(string); ok { + m.Content.Hash.Value = &val + } + } + if ph, ok := c["payloadHash"].(map[string]any); ok { + m.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{} + if alg, ok := ph["algorithm"].(string); ok { + m.Content.PayloadHash.Algorithm = &alg + } + if val, ok := ph["value"].(string); ok { + m.Content.PayloadHash.Value = &val + } + } + } + *output = m + return nil + case *models.IntotoV002Schema: + if in == nil { + return fmt.Errorf("nil *models.IntotoV002Schema") + } + *output = *in + return nil + case models.IntotoV002Schema: + *output = in + return nil + default: + return fmt.Errorf("unsupported input type %T for DecodeEntry", input) + } +} + +func (v *V002Entry) Unmarshal(pe models.ProposedEntry) error { + it, ok := pe.(*models.Intoto) + if !ok { + return errors.New("cannot unmarshal non Intoto v0.0.2 type") + } + + var err error + if err := DecodeEntry(it.Spec, &v.IntotoObj); err != nil { + return err + } + + // field validation + if err := v.IntotoObj.Validate(strfmt.Default); err != nil { + return err + } + + if string(v.IntotoObj.Content.Envelope.Payload) == "" { + return nil + } + + env := &dsse.Envelope{ + Payload: string(v.IntotoObj.Content.Envelope.Payload), + PayloadType: *v.IntotoObj.Content.Envelope.PayloadType, + } + + allPubKeyBytes := make([][]byte, 0) + for i, sig := range v.IntotoObj.Content.Envelope.Signatures { + if sig == nil { + v.IntotoObj.Content.Envelope.Signatures = slices.Delete(v.IntotoObj.Content.Envelope.Signatures, i, i) + continue + } + env.Signatures = append(env.Signatures, dsse.Signature{ + KeyID: sig.Keyid, + Sig: string(*sig.Sig), + }) + + allPubKeyBytes = append(allPubKeyBytes, *sig.PublicKey) + } + + if _, err := verifyEnvelope(allPubKeyBytes, env); err != nil { + return err + } + + v.env = *env + + decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload)) + if err != nil { + return fmt.Errorf("could not decode envelope payload: %w", err) + } + + h := sha256.Sum256(decodedPayload) + v.IntotoObj.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{ + Algorithm: conv.Pointer(models.IntotoV002SchemaContentPayloadHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(h[:])), + } + + return nil +} + +func (v *V002Entry) Canonicalize(_ context.Context) ([]byte, error) { + if err := v.IntotoObj.Validate(strfmt.Default); err != nil { + return nil, err + } + + if v.IntotoObj.Content.Hash == nil { + return nil, errors.New("missing envelope digest") + } + + if err := v.IntotoObj.Content.Hash.Validate(strfmt.Default); err != nil { + return nil, fmt.Errorf("error validating envelope digest: %w", err) + } + + if v.IntotoObj.Content.PayloadHash == nil { + return nil, errors.New("missing payload digest") + } + + if err := v.IntotoObj.Content.PayloadHash.Validate(strfmt.Default); err != nil { + return nil, fmt.Errorf("error validating payload digest: %w", err) + } + + if len(v.IntotoObj.Content.Envelope.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + + canonicalEntry := models.IntotoV002Schema{ + Content: &models.IntotoV002SchemaContent{ + Envelope: &models.IntotoV002SchemaContentEnvelope{ + PayloadType: v.IntotoObj.Content.Envelope.PayloadType, + Signatures: v.IntotoObj.Content.Envelope.Signatures, + }, + Hash: v.IntotoObj.Content.Hash, + PayloadHash: v.IntotoObj.Content.PayloadHash, + }, + } + itObj := models.Intoto{} + itObj.APIVersion = conv.Pointer(APIVERSION) + itObj.Spec = &canonicalEntry + + return json.Marshal(&itObj) +} + +// AttestationKey returns the digest of the attestation that was uploaded, to be used to lookup the attestation from storage +func (v *V002Entry) AttestationKey() string { + if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil { + return fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value) + } + return "" +} + +// AttestationKeyValue returns both the key and value to be persisted into attestation storage +func (v *V002Entry) AttestationKeyValue() (string, []byte) { + storageSize := base64.StdEncoding.DecodedLen(len(v.env.Payload)) + if storageSize > maxAttestationSize { + log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", storageSize, maxAttestationSize) + return "", nil + } + attBytes, err := base64.StdEncoding.DecodeString(v.env.Payload) + if err != nil { + log.Logger.Infof("could not decode envelope payload: %w", err) + return "", nil + } + return v.AttestationKey(), attBytes +} + +type verifier struct { + s signature.Signer + v signature.Verifier +} + +func (v *verifier) KeyID() (string, error) { + return "", nil +} + +func (v *verifier) Public() crypto.PublicKey { + // the dsse library uses this to generate a key ID if the KeyID function returns an empty string + // as well for the AcceptedKey return value. Unfortunately since key ids can be arbitrary, we don't + // know how to generate a matching id for the key id on the envelope's signature... + // dsse verify will skip verifiers whose key id doesn't match the signature's key id, unless it fails + // to generate one from the public key... so we trick it by returning nil ¯\_(ツ)_/¯ + return nil +} + +func (v *verifier) Sign(_ context.Context, data []byte) (sig []byte, err error) { + if v.s == nil { + return nil, errors.New("nil signer") + } + sig, err = v.s.SignMessage(bytes.NewReader(data), options.WithCryptoSignerOpts(crypto.SHA256)) + if err != nil { + return nil, err + } + return sig, nil +} + +func (v *verifier) Verify(_ context.Context, data, sig []byte) error { + if v.v == nil { + return errors.New("nil verifier") + } + return v.v.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data)) +} + +func (v V002Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { + returnVal := models.Intoto{} + re := V002Entry{ + IntotoObj: models.IntotoV002Schema{ + Content: &models.IntotoV002SchemaContent{ + Envelope: &models.IntotoV002SchemaContentEnvelope{}, + }, + }} + var err error + artifactBytes := props.ArtifactBytes + if artifactBytes == nil { + if props.ArtifactPath == nil { + return nil, errors.New("path to artifact file must be specified") + } + if props.ArtifactPath.IsAbs() { + return nil, errors.New("intoto envelopes cannot be fetched over HTTP(S)") + } + artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) + if err != nil { + return nil, err + } + } + + env := dsse.Envelope{} + if err := json.Unmarshal(artifactBytes, &env); err != nil { + return nil, fmt.Errorf("payload must be a valid dsse envelope: %w", err) + } + + allPubKeyBytes := make([][]byte, 0) + if len(props.PublicKeyBytes) > 0 { + allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...) + } + + if len(props.PublicKeyPaths) > 0 { + for _, path := range props.PublicKeyPaths { + if path.IsAbs() { + return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)") + } + + publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path)) + if err != nil { + return nil, fmt.Errorf("error reading public key file: %w", err) + } + + allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes) + } + } + + keysBySig, err := verifyEnvelope(allPubKeyBytes, &env) + if err != nil { + return nil, err + } + + b64 := strfmt.Base64([]byte(env.Payload)) + re.IntotoObj.Content.Envelope.Payload = b64 + re.IntotoObj.Content.Envelope.PayloadType = &env.PayloadType + + for _, sig := range env.Signatures { + key, ok := keysBySig[sig.Sig] + if !ok { + return nil, errors.New("all signatures must have a key that verifies it") + } + + canonKey, err := key.CanonicalValue() + if err != nil { + return nil, fmt.Errorf("could not canonicize key: %w", err) + } + + keyBytes := strfmt.Base64(canonKey) + sigBytes := strfmt.Base64([]byte(sig.Sig)) + re.IntotoObj.Content.Envelope.Signatures = append(re.IntotoObj.Content.Envelope.Signatures, &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{ + Keyid: sig.KeyID, + Sig: &sigBytes, + PublicKey: &keyBytes, + }) + } + + h := sha256.Sum256([]byte(artifactBytes)) + re.IntotoObj.Content.Hash = &models.IntotoV002SchemaContentHash{ + Algorithm: conv.Pointer(models.IntotoV001SchemaContentHashAlgorithmSha256), + Value: conv.Pointer(hex.EncodeToString(h[:])), + } + + returnVal.Spec = re.IntotoObj + returnVal.APIVersion = conv.Pointer(re.APIVersion()) + + return &returnVal, nil +} + +// verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys. +// it then uses these to verify the envelope and makes sure that every signature on the envelope is verified. +// it returns a map of verifiers indexed by the signature the verifier corresponds to. +func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) { + // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature + verifierBySig := make(map[string]*x509.PublicKey) + allSigs := make(map[string]struct{}) + for _, sig := range env.Signatures { + allSigs[sig.Sig] = struct{}{} + } + + for _, pubKeyBytes := range allPubKeyBytes { + key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes)) + if err != nil { + return nil, fmt.Errorf("could not parse public key as x509: %w", err) + } + + vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("could not load verifier: %w", err) + } + + dsseVfr, err := dsse.NewEnvelopeVerifier(&verifier{ + v: vfr, + }) + + if err != nil { + return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err) + } + + accepted, err := dsseVfr.Verify(context.Background(), env) + if err != nil { + return nil, fmt.Errorf("could not verify envelope: %w", err) + } + + for _, accept := range accepted { + delete(allSigs, accept.Sig.Sig) + verifierBySig[accept.Sig.Sig] = key + } + } + + if len(allSigs) > 0 { + return nil, errors.New("all signatures must have a key that verifies it") + } + + return verifierBySig, nil +} + +func (v V002Entry) Verifiers() ([]pkitypes.PublicKey, error) { + if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil { + return nil, errors.New("intoto v0.0.2 entry not initialized") + } + + sigs := v.IntotoObj.Content.Envelope.Signatures + if len(sigs) == 0 { + return nil, errors.New("no signatures found on intoto entry") + } + + var keys []pkitypes.PublicKey + for _, s := range v.IntotoObj.Content.Envelope.Signatures { + key, err := x509.NewPublicKey(bytes.NewReader(*s.PublicKey)) + if err != nil { + return nil, err + } + keys = append(keys, key) + } + return keys, nil +} + +func (v V002Entry) ArtifactHash() (string, error) { + if v.IntotoObj.Content == nil || v.IntotoObj.Content.PayloadHash == nil || v.IntotoObj.Content.PayloadHash.Algorithm == nil || v.IntotoObj.Content.PayloadHash.Value == nil { + return "", errors.New("intoto v0.0.2 entry not initialized") + } + return strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)), nil +} + +func (v V002Entry) Insertable() (bool, error) { + if v.IntotoObj.Content == nil { + return false, errors.New("missing content property") + } + if v.IntotoObj.Content.Envelope == nil { + return false, errors.New("missing envelope property") + } + if len(v.IntotoObj.Content.Envelope.Payload) == 0 { + return false, errors.New("missing envelope content") + } + + if v.IntotoObj.Content.Envelope.PayloadType == nil || len(*v.IntotoObj.Content.Envelope.PayloadType) == 0 { + return false, errors.New("missing payloadType content") + } + + if len(v.IntotoObj.Content.Envelope.Signatures) == 0 { + return false, errors.New("missing signatures content") + } + for _, sig := range v.IntotoObj.Content.Envelope.Signatures { + if sig == nil { + return false, errors.New("missing signature entry") + } + if sig.Sig == nil || len(*sig.Sig) == 0 { + return false, errors.New("missing signature content") + } + if sig.PublicKey == nil || len(*sig.PublicKey) == 0 { + return false, errors.New("missing publicKey content") + } + } + + if v.env.Payload == "" || v.env.PayloadType == "" || len(v.env.Signatures) == 0 { + return false, errors.New("invalid DSSE envelope") + } + + return true, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json new file mode 100644 index 00000000000..3c404a30a02 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json @@ -0,0 +1,105 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://rekor.sigstore.dev/types/intoto/intoto_v0_0_2_schema.json", + "title": "intoto v0.0.2 Schema", + "description": "Schema for intoto object", + "type": "object", + "properties": { + "content": { + "type": "object", + "properties": { + "envelope": { + "description": "dsse envelope", + "type": "object", + "properties": { + "payload": { + "description": "payload of the envelope", + "type": "string", + "format": "byte", + "writeOnly": true + }, + "payloadType": { + "description": "type describing the payload", + "type": "string" + }, + "signatures": { + "description": "collection of all signatures of the envelope's payload", + "type": "array", + "minItems": 1, + "items": { + "description": "a signature of the envelope's payload along with the public key for the signature", + "type": "object", + "properties": { + "keyid": { + "description": "optional id of the key used to create the signature", + "type": "string" + }, + "sig": { + "description": "signature of the payload", + "type": "string", + "format": "byte" + }, + "publicKey": { + "description": "public key that corresponds to this signature", + "type": "string", + "format": "byte" + } + }, + "required": ["sig", "publicKey"] + } + } + }, + "required": ["payloadType", "signatures"] + }, + "hash": { + "description": "Specifies the hash algorithm and value encompassing the entire signed envelope", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ + "sha256" + ] + }, + "value": { + "description": "The hash value for the archive", + "type": "string" + } + }, + "required": [ + "algorithm", + "value" + ], + "readOnly": true + }, + "payloadHash": { + "description": "Specifies the hash algorithm and value covering the payload within the DSSE envelope", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256" ] + }, + "value": { + "description": "The hash value of the payload", + "type": "string" + } + }, + "required": [ + "algorithm", + "value" + ], + "readOnly": true + } + }, + "required": [ + "envelope" + ] + } + }, + "required": [ + "content" + ] +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/test_util.go b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go new file mode 100644 index 00000000000..b4daa3556e7 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go @@ -0,0 +1,94 @@ +/* +Copyright © 2021 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "context" + + "github.com/go-openapi/strfmt" + + "github.com/sigstore/rekor/pkg/generated/models" + pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes" +) + +type BaseUnmarshalTester struct{} + +func (u BaseUnmarshalTester) NewEntry() EntryImpl { + return &BaseUnmarshalTester{} +} + +func (u BaseUnmarshalTester) ArtifactHash() (string, error) { + return "", nil +} + +func (u BaseUnmarshalTester) Verifiers() ([]pkitypes.PublicKey, error) { + return nil, nil +} + +func (u BaseUnmarshalTester) APIVersion() string { + return "2.0.1" +} + +func (u BaseUnmarshalTester) IndexKeys() ([]string, error) { + return []string{}, nil +} + +func (u BaseUnmarshalTester) Canonicalize(_ context.Context) ([]byte, error) { + return nil, nil +} + +func (u BaseUnmarshalTester) Unmarshal(_ models.ProposedEntry) error { + return nil +} + +func (u BaseUnmarshalTester) Validate() error { + return nil +} + +func (u BaseUnmarshalTester) AttestationKey() string { + return "" +} + +func (u BaseUnmarshalTester) AttestationKeyValue() (string, []byte) { + return "", nil +} + +func (u BaseUnmarshalTester) CreateFromArtifactProperties(_ context.Context, _ ArtifactProperties) (models.ProposedEntry, error) { + return nil, nil +} + +func (u BaseUnmarshalTester) Insertable() (bool, error) { + return false, nil +} + +type BaseProposedEntryTester struct{} + +func (b BaseProposedEntryTester) Kind() string { + return "nil" +} + +func (b BaseProposedEntryTester) SetKind(_ string) { + +} + +func (b BaseProposedEntryTester) Validate(_ strfmt.Registry) error { + return nil +} + +func (b BaseProposedEntryTester) ContextValidate(_ context.Context, _ strfmt.Registry) error { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/types.go b/vendor/github.com/sigstore/rekor/pkg/types/types.go new file mode 100644 index 00000000000..72722321b18 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/types.go @@ -0,0 +1,88 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "context" + "errors" + "fmt" + "slices" + "sync" + + "github.com/sigstore/rekor/pkg/generated/models" +) + +// TypeMap stores mapping between type strings and entry constructors +// entries are written once at process initialization and read for each transaction, so we use +// sync.Map which is optimized for this case +var TypeMap sync.Map + +// RekorType is the base struct that is embedded in all type implementations +type RekorType struct { + Kind string // this is the unique string that identifies the type + VersionMap VersionEntryFactoryMap // this maps the supported versions to implementation +} + +// TypeImpl is implemented by all types to support the polymorphic conversion of abstract +// proposed entry to a working implementation for the versioned type requested, if supported +type TypeImpl interface { + CreateProposedEntry(context.Context, string, ArtifactProperties) (models.ProposedEntry, error) + DefaultVersion() string + SupportedVersions() []string + IsSupportedVersion(string) bool + UnmarshalEntry(pe models.ProposedEntry) (EntryImpl, error) +} + +// VersionedUnmarshal extracts the correct implementing factory function from the type's version map, +// creates an entry of that versioned type and then calls that versioned type's unmarshal method +func (rt *RekorType) VersionedUnmarshal(pe models.ProposedEntry, version string) (EntryImpl, error) { + ef, err := rt.VersionMap.GetEntryFactory(version) + if err != nil { + return nil, fmt.Errorf("%s implementation for version '%v' not found: %w", rt.Kind, version, err) + } + entry := ef() + if entry == nil { + return nil, errors.New("failure generating object") + } + if pe == nil { + return entry, nil + } + return entry, entry.Unmarshal(pe) +} + +// SupportedVersions returns a list of versions of this type that can be currently entered into the log +func (rt *RekorType) SupportedVersions() []string { + return rt.VersionMap.SupportedVersions() +} + +// IsSupportedVersion returns true if the version can be inserted into the log, and false if not +func (rt *RekorType) IsSupportedVersion(proposedVersion string) bool { + return slices.Contains(rt.SupportedVersions(), proposedVersion) +} + +// ListImplementedTypes returns a list of all type strings currently known to +// be implemented +func ListImplementedTypes() []string { + retVal := []string{} + TypeMap.Range(func(k interface{}, v interface{}) bool { + tf := v.(func() TypeImpl) + for _, verStr := range tf().SupportedVersions() { + retVal = append(retVal, fmt.Sprintf("%v:%v", k.(string), verStr)) + } + return true + }) + return retVal +} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go b/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go new file mode 100644 index 00000000000..d2716370994 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go @@ -0,0 +1,97 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sync" + + "github.com/blang/semver" + "github.com/sigstore/rekor/pkg/internal/log" +) + +// VersionEntryFactoryMap defines a map-like interface to find the correct implementation for a version string +// This could be a simple map[string][EntryFactory], or something more elegant (e.g. semver) +type VersionEntryFactoryMap interface { + GetEntryFactory(string) (EntryFactory, error) // return the entry factory for the specified version + SetEntryFactory(string, EntryFactory) error // set the entry factory for the specified version + Count() int // return the count of entry factories currently in the map + SupportedVersions() []string // return a list of versions currently stored in the map +} + +// SemVerEntryFactoryMap implements a map that allows implementations to specify their supported versions using +// semver-compliant strings +type SemVerEntryFactoryMap struct { + factoryMap map[string]EntryFactory + + sync.RWMutex +} + +func NewSemVerEntryFactoryMap() VersionEntryFactoryMap { + s := SemVerEntryFactoryMap{} + s.factoryMap = make(map[string]EntryFactory) + return &s +} + +func (s *SemVerEntryFactoryMap) Count() int { + return len(s.factoryMap) +} + +func (s *SemVerEntryFactoryMap) GetEntryFactory(version string) (EntryFactory, error) { + s.RLock() + defer s.RUnlock() + + semverToMatch, err := semver.Parse(version) + if err != nil { + log.Logger.Error(err) + return nil, err + } + + // will return first function that matches + for k, v := range s.factoryMap { + semverRange, err := semver.ParseRange(k) + if err != nil { + log.Logger.Error(err) + return nil, err + } + + if semverRange(semverToMatch) { + return v, nil + } + } + return nil, fmt.Errorf("unable to locate entry for version %s", version) +} + +func (s *SemVerEntryFactoryMap) SetEntryFactory(constraint string, ef EntryFactory) error { + s.Lock() + defer s.Unlock() + + if _, err := semver.ParseRange(constraint); err != nil { + log.Logger.Error(err) + return err + } + + s.factoryMap[constraint] = ef + return nil +} + +func (s *SemVerEntryFactoryMap) SupportedVersions() []string { + var versions []string + for k := range s.factoryMap { + versions = append(versions, k) + } + return versions +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go new file mode 100644 index 00000000000..27b246d7997 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go @@ -0,0 +1,165 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// heavily borrowed from https://github.com/transparency-dev/formats/blob/main/log/checkpoint.go + +type Checkpoint struct { + // Origin is the unique identifier/version string + Origin string + // Size is the number of entries in the log at this checkpoint. + Size uint64 + // Hash is the hash which commits to the contents of the entire log. + Hash []byte + // OtherContent is any additional data to be included in the signed payload; each element is assumed to be one line + OtherContent []string +} + +// String returns the String representation of the Checkpoint +func (c Checkpoint) String() string { + var b strings.Builder + fmt.Fprintf(&b, "%s\n%d\n%s\n", c.Origin, c.Size, base64.StdEncoding.EncodeToString(c.Hash)) + for _, line := range c.OtherContent { + fmt.Fprintf(&b, "%s\n", line) + } + return b.String() +} + +// MarshalCheckpoint returns the common format representation of this Checkpoint. +func (c Checkpoint) MarshalCheckpoint() ([]byte, error) { + return []byte(c.String()), nil +} + +// UnmarshalCheckpoint parses the common formatted checkpoint data and stores the result +// in the Checkpoint. +// +// The supplied data is expected to begin with the following 3 lines of text, +// each followed by a newline: +// +// +// +// ... +// ... +// +// This will discard any content found after the checkpoint (including signatures) +func (c *Checkpoint) UnmarshalCheckpoint(data []byte) error { + l := bytes.Split(data, []byte("\n")) + if len(l) < 4 { + return errors.New("invalid checkpoint - too few newlines") + } + origin := string(l[0]) + if len(origin) == 0 { + return errors.New("invalid checkpoint - empty ecosystem") + } + size, err := strconv.ParseUint(string(l[1]), 10, 64) + if err != nil { + return fmt.Errorf("invalid checkpoint - size invalid: %w", err) + } + h, err := base64.StdEncoding.DecodeString(string(l[2])) + if err != nil { + return fmt.Errorf("invalid checkpoint - invalid hash: %w", err) + } + *c = Checkpoint{ + Origin: origin, + Size: size, + Hash: h, + } + if len(l) >= 3 { + for _, line := range l[3:] { + if len(line) == 0 { + break + } + c.OtherContent = append(c.OtherContent, string(line)) + } + } + return nil +} + +type SignedCheckpoint struct { + Checkpoint + SignedNote +} + +func CreateSignedCheckpoint(c Checkpoint) (*SignedCheckpoint, error) { + text, err := c.MarshalCheckpoint() + if err != nil { + return nil, err + } + return &SignedCheckpoint{ + Checkpoint: c, + SignedNote: SignedNote{Note: string(text)}, + }, nil +} + +func SignedCheckpointValidator(strToValidate string) bool { + s := SignedNote{} + if err := s.UnmarshalText([]byte(strToValidate)); err != nil { + return false + } + c := &Checkpoint{} + return c.UnmarshalCheckpoint([]byte(s.Note)) == nil +} + +func CheckpointValidator(strToValidate string) bool { + c := &Checkpoint{} + return c.UnmarshalCheckpoint([]byte(strToValidate)) == nil +} + +func (r *SignedCheckpoint) UnmarshalText(data []byte) error { + s := SignedNote{} + if err := s.UnmarshalText([]byte(data)); err != nil { + return fmt.Errorf("unmarshalling signed note: %w", err) + } + c := Checkpoint{} + if err := c.UnmarshalCheckpoint([]byte(s.Note)); err != nil { + return fmt.Errorf("unmarshalling checkpoint: %w", err) + } + *r = SignedCheckpoint{Checkpoint: c, SignedNote: s} + return nil +} + +// CreateAndSignCheckpoint creates a signed checkpoint as a commitment to the current root hash +func CreateAndSignCheckpoint(ctx context.Context, hostname string, treeID int64, treeSize uint64, rootHash []byte, signer signature.Signer) ([]byte, error) { + sth, err := CreateSignedCheckpoint(Checkpoint{ + Origin: fmt.Sprintf("%s - %d", hostname, treeID), + Size: treeSize, + Hash: rootHash, + }) + if err != nil { + return nil, fmt.Errorf("error creating checkpoint: %w", err) + } + if _, err := sth.Sign(hostname, signer, options.WithContext(ctx)); err != nil { + return nil, fmt.Errorf("error signing checkpoint: %w", err) + } + scBytes, err := sth.MarshalText() + if err != nil { + return nil, fmt.Errorf("error marshalling checkpoint: %w", err) + } + return scBytes, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/fetch.go b/vendor/github.com/sigstore/rekor/pkg/util/fetch.go new file mode 100644 index 00000000000..7f8e93fb046 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/fetch.go @@ -0,0 +1,49 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" +) + +// FileOrURLReadCloser Note: caller is responsible for closing ReadCloser returned from method! +func FileOrURLReadCloser(ctx context.Context, url string, content []byte) (io.ReadCloser, error) { + var dataReader io.ReadCloser + if url != "" { + //TODO: set timeout here, SSL settings? + client := &http.Client{} + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("error received while fetching artifact '%v': %v", url, resp.Status) + } + + dataReader = resp.Body + } else { + dataReader = io.NopCloser(bytes.NewReader(content)) + } + return dataReader, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/sha.go b/vendor/github.com/sigstore/rekor/pkg/util/sha.go new file mode 100644 index 00000000000..07b8fb1b53d --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/sha.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "crypto" + "fmt" + "strings" +) + +// PrefixSHA sets the prefix of a sha hash to match how it is stored based on the length. +func PrefixSHA(sha string) string { + var prefix string + var components = strings.Split(sha, ":") + + if len(components) == 2 { + return sha + } + + switch len(sha) { + case 40: + prefix = "sha1:" + case 64: + prefix = "sha256:" + case 96: + prefix = "sha384:" + case 128: + prefix = "sha512:" + } + + return fmt.Sprintf("%v%v", prefix, sha) +} + +func UnprefixSHA(sha string) (crypto.Hash, string) { + components := strings.Split(sha, ":") + + if len(components) == 2 { + prefix := components[0] + sha = components[1] + + switch prefix { + case "sha1": + return crypto.SHA1, sha + case "sha256": + return crypto.SHA256, sha + case "sha384": + return crypto.SHA384, sha + case "sha512": + return crypto.SHA512, sha + default: + return crypto.Hash(0), "" + } + } + + switch len(sha) { + case 40: + return crypto.SHA1, sha + case 64: + return crypto.SHA256, sha + case 96: + return crypto.SHA384, sha + case 128: + return crypto.SHA512, sha + } + + return crypto.Hash(0), "" +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go new file mode 100644 index 00000000000..4c9c8f8a70f --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go @@ -0,0 +1,211 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bufio" + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "golang.org/x/mod/sumdb/note" +) + +type SignedNote struct { + // Textual representation of a note to sign. + Note string + // Signatures are one or more signature lines covering the payload + Signatures []note.Signature +} + +// Sign adds a signature to a SignedCheckpoint object +// The signature is added to the signature array as well as being directly returned to the caller +func (s *SignedNote) Sign(identity string, signer signature.Signer, opts signature.SignOption) (*note.Signature, error) { + sig, err := signer.SignMessage(bytes.NewReader([]byte(s.Note)), opts) + if err != nil { + return nil, fmt.Errorf("signing note: %w", err) + } + + pk, err := signer.PublicKey() + if err != nil { + return nil, fmt.Errorf("retrieving public key: %w", err) + } + pkHash, err := getPublicKeyHash(pk) + if err != nil { + return nil, err + } + + signature := note.Signature{ + Name: identity, + Hash: pkHash, + Base64: base64.StdEncoding.EncodeToString(sig), + } + + s.Signatures = append(s.Signatures, signature) + return &signature, nil +} + +// Verify checks that one of the signatures can be successfully verified using +// the supplied public key +func (s SignedNote) Verify(verifier signature.Verifier) bool { + if len(s.Signatures) == 0 { + return false + } + + msg := []byte(s.Note) + digest := sha256.Sum256(msg) + + pk, err := verifier.PublicKey() + if err != nil { + return false + } + verifierPkHash, err := getPublicKeyHash(pk) + if err != nil { + return false + } + + for _, s := range s.Signatures { + sigBytes, err := base64.StdEncoding.DecodeString(s.Base64) + if err != nil { + return false + } + + if s.Hash != verifierPkHash { + return false + } + + opts := []signature.VerifyOption{} + switch pk.(type) { + case *rsa.PublicKey, *ecdsa.PublicKey: + opts = append(opts, options.WithDigest(digest[:])) + case ed25519.PublicKey: + break + default: + return false + } + if err := verifier.VerifySignature(bytes.NewReader(sigBytes), bytes.NewReader(msg), opts...); err != nil { + return false + } + } + return true +} + +// MarshalText returns the common format representation of this SignedNote. +func (s SignedNote) MarshalText() ([]byte, error) { + return []byte(s.String()), nil +} + +// String returns the String representation of the SignedNote +func (s SignedNote) String() string { + var b strings.Builder + b.WriteString(s.Note) + b.WriteRune('\n') + for _, sig := range s.Signatures { + var hbuf [4]byte + binary.BigEndian.PutUint32(hbuf[:], sig.Hash) + sigBytes, _ := base64.StdEncoding.DecodeString(sig.Base64) + b64 := base64.StdEncoding.EncodeToString(append(hbuf[:], sigBytes...)) + fmt.Fprintf(&b, "%c %s %s\n", '\u2014', sig.Name, b64) + } + + return b.String() +} + +// UnmarshalText parses the common formatted signed note data and stores the result +// in the SignedNote. THIS DOES NOT VERIFY SIGNATURES INSIDE THE CONTENT! +// +// The supplied data is expected to contain a single Note, followed by a single +// line with no comment, followed by one or more lines with the following format: +// +// \u2014 name signature +// +// - name is the string associated with the signer +// - signature is a base64 encoded string; the first 4 bytes of the decoded value is a +// hint to the public key; it is a big-endian encoded uint32 representing the first +// 4 bytes of the SHA256 hash of the public key +func (s *SignedNote) UnmarshalText(data []byte) error { + sigSplit := []byte("\n\n") + // Must end with signature block preceded by blank line. + split := bytes.LastIndex(data, sigSplit) + if split < 0 { + return errors.New("malformed note") + } + text, data := data[:split+1], data[split+2:] + if len(data) == 0 || data[len(data)-1] != '\n' { + return errors.New("malformed note") + } + + sn := SignedNote{ + Note: string(text), + } + + b := bufio.NewScanner(bytes.NewReader(data)) + for b.Scan() { + var name, signature string + if _, err := fmt.Fscanf(strings.NewReader(b.Text()), "\u2014 %s %s\n", &name, &signature); err != nil { + return fmt.Errorf("parsing signature: %w", err) + } + + sigBytes, err := base64.StdEncoding.DecodeString(signature) + if err != nil { + return fmt.Errorf("decoding signature: %w", err) + } + if len(sigBytes) < 5 { + return errors.New("signature is too small") + } + + sig := note.Signature{ + Name: name, + Hash: binary.BigEndian.Uint32(sigBytes[0:4]), + Base64: base64.StdEncoding.EncodeToString(sigBytes[4:]), + } + sn.Signatures = append(sn.Signatures, sig) + + } + if len(sn.Signatures) == 0 { + return errors.New("no signatures found in input") + } + + // copy sc to s + *s = sn + return nil +} + +func SignedNoteValidator(strToValidate string) bool { + s := SignedNote{} + return s.UnmarshalText([]byte(strToValidate)) == nil +} + +func getPublicKeyHash(publicKey crypto.PublicKey) (uint32, error) { + pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return 0, fmt.Errorf("marshalling public key: %w", err) + } + pkSha := sha256.Sum256(pubKeyBytes) + hash := binary.BigEndian.Uint32(pkSha[:]) + return hash, nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go new file mode 100644 index 00000000000..61846923b79 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go @@ -0,0 +1,234 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/client" + "github.com/sigstore/rekor/pkg/generated/client/tlog" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/util" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// ProveConsistency verifies consistency between an initial, trusted STH +// and a second new STH. Callers MUST verify signature on the STHs'. +func ProveConsistency(ctx context.Context, rClient *client.Rekor, + oldSTH *util.SignedCheckpoint, newSTH *util.SignedCheckpoint, treeID string) error { + oldTreeSize := int64(oldSTH.Size) // nolint: gosec + switch { + case oldTreeSize == 0: + return errors.New("consistency proofs can not be computed starting from an empty log") + case oldTreeSize == int64(newSTH.Size): // nolint: gosec + if !bytes.Equal(oldSTH.Hash, newSTH.Hash) { + return errors.New("old root hash does not match STH hash") + } + case oldTreeSize < int64(newSTH.Size): // nolint: gosec + consistencyParams := tlog.NewGetLogProofParamsWithContext(ctx) + consistencyParams.FirstSize = &oldTreeSize // Root size at the old, or trusted state. + consistencyParams.LastSize = int64(newSTH.Size) // nolint: gosec // Root size at the new state to verify against. + consistencyParams.TreeID = &treeID + consistencyProof, err := rClient.Tlog.GetLogProof(consistencyParams) + if err != nil { + return err + } + var hashes [][]byte + for _, h := range consistencyProof.Payload.Hashes { + b, err := hex.DecodeString(h) + if err != nil { + return errors.New("error decoding consistency proof hashes") + } + hashes = append(hashes, b) + } + if err := proof.VerifyConsistency(rfc6962.DefaultHasher, + oldSTH.Size, newSTH.Size, hashes, oldSTH.Hash, newSTH.Hash); err != nil { + return err + } + case oldTreeSize > int64(newSTH.Size): // nolint: gosec + return errors.New("inclusion proof returned a tree size larger than the verified tree size") + } + return nil + +} + +// VerifyCurrentCheckpoint verifies the provided checkpoint by verifying consistency +// against a newly fetched Checkpoint. +// nolint +func VerifyCurrentCheckpoint(ctx context.Context, rClient *client.Rekor, verifier signature.Verifier, + oldSTH *util.SignedCheckpoint) (*util.SignedCheckpoint, error) { + // The oldSTH should already be verified, but check for robustness. + if !oldSTH.Verify(verifier) { + return nil, errors.New("signature on old tree head did not verify") + } + + // Get and verify against the current STH. + infoParams := tlog.NewGetLogInfoParamsWithContext(ctx) + result, err := rClient.Tlog.GetLogInfo(infoParams) + if err != nil { + return nil, err + } + + logInfo := result.GetPayload() + sth := util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*logInfo.SignedTreeHead)); err != nil { + return nil, err + } + + // Verify the signature on the SignedCheckpoint. + if !sth.Verify(verifier) { + return nil, errors.New("signature on tree head did not verify") + } + + // Now verify consistency up to the STH. + if err := ProveConsistency(ctx, rClient, oldSTH, &sth, *logInfo.TreeID); err != nil { + return nil, err + } + return &sth, nil +} + +// VerifyCheckpointSignature verifies the signature on a checkpoint (signed tree head). It does +// not verify consistency against other checkpoints. +// nolint +func VerifyCheckpointSignature(e *models.LogEntryAnon, verifier signature.Verifier) error { + sth := &util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*e.Verification.InclusionProof.Checkpoint)); err != nil { + return fmt.Errorf("unmarshalling log entry checkpoint to SignedCheckpoint: %w", err) + } + if !sth.Verify(verifier) { + return errors.New("signature on checkpoint did not verify") + } + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return errors.New("decoding inclusion proof root has") + } + + if !bytes.EqualFold(rootHash, sth.Hash) { + return fmt.Errorf("proof root hash does not match signed tree head, expected %s got %s", + *e.Verification.InclusionProof.RootHash, + hex.EncodeToString(sth.Hash)) + } + return nil +} + +// VerifyInclusion verifies an entry's inclusion proof. Clients MUST either verify +// the root hash against a new STH (via VerifyCurrentCheckpoint) or against a +// trusted, existing STH (via ProveConsistency). +// nolint +func VerifyInclusion(ctx context.Context, e *models.LogEntryAnon) error { + if e.Verification == nil || e.Verification.InclusionProof == nil { + return errors.New("inclusion proof not provided") + } + + hashes := [][]byte{} + for _, h := range e.Verification.InclusionProof.Hashes { + hb, _ := hex.DecodeString(h) + hashes = append(hashes, hb) + } + + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return err + } + + // Verify the inclusion proof. + entryBytes, err := base64.StdEncoding.DecodeString(e.Body.(string)) + if err != nil { + return err + } + leafHash := rfc6962.DefaultHasher.HashLeaf(entryBytes) + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(*e.Verification.InclusionProof.LogIndex), + uint64(*e.Verification.InclusionProof.TreeSize), leafHash, hashes, rootHash); err != nil { // nolint: gosec + return err + } + + return nil +} + +// VerifySignedEntryTimestamp verifies the entry's SET against the provided +// public key. +// nolint +func VerifySignedEntryTimestamp(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + if e.Verification == nil { + return errors.New("missing verification") + } + if e.Verification.SignedEntryTimestamp == nil { + return errors.New("signature missing") + } + + type bundle struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + // Note that this is the virtual index. + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` + } + bundlePayload := bundle{ + Body: e.Body, + IntegratedTime: *e.IntegratedTime, + LogIndex: *e.LogIndex, + LogID: *e.LogID, + } + contents, err := json.Marshal(bundlePayload) + if err != nil { + return fmt.Errorf("marshaling bundle: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing bundle: %w", err) + } + + // verify the SET against the public key + if err := verifier.VerifySignature(bytes.NewReader(e.Verification.SignedEntryTimestamp), + bytes.NewReader(canonicalized), options.WithContext(ctx)); err != nil { + return fmt.Errorf("unable to verify bundle: %w", err) + } + return nil +} + +// VerifyLogEntry performs verification of a LogEntry given a Rekor verifier. +// Performs inclusion proof verification up to a verified root hash, +// SignedEntryTimestamp verification, and checkpoint verification. +// nolint +func VerifyLogEntry(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + // Verify the inclusion proof using the body's leaf hash. + if err := VerifyInclusion(ctx, e); err != nil { + return err + } + + // Verify checkpoint, which includes a signed root hash. + if err := VerifyCheckpointSignature(e, verifier); err != nil { + return err + } + + // Verify the Signed Entry Timestamp. + if err := VerifySignedEntryTimestamp(ctx, e, verifier); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt new file mode 100644 index 00000000000..406ee260604 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2023 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore-go/LICENSE b/vendor/github.com/sigstore/sigstore-go/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go new file mode 100644 index 00000000000..5919230e8a3 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go @@ -0,0 +1,408 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + protodsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + "golang.org/x/mod/semver" + "google.golang.org/protobuf/encoding/protojson" + + "github.com/sigstore/sigstore-go/pkg/tlog" + "github.com/sigstore/sigstore-go/pkg/verify" +) + +var ErrValidation = errors.New("validation error") +var ErrUnsupportedMediaType = fmt.Errorf("%w: unsupported media type", ErrValidation) +var ErrEmptyBundle = fmt.Errorf("%w: empty protobuf bundle", ErrValidation) +var ErrMissingVerificationMaterial = fmt.Errorf("%w: missing verification material", ErrValidation) +var ErrMissingBundleContent = fmt.Errorf("%w: missing bundle content", ErrValidation) +var ErrUnimplemented = errors.New("unimplemented") +var ErrInvalidAttestation = fmt.Errorf("%w: invalid attestation", ErrValidation) +var ErrMissingEnvelope = fmt.Errorf("%w: missing valid envelope", ErrInvalidAttestation) +var ErrDecodingJSON = fmt.Errorf("%w: decoding json", ErrInvalidAttestation) +var ErrDecodingB64 = fmt.Errorf("%w: decoding base64", ErrInvalidAttestation) + +const mediaTypeBase = "application/vnd.dev.sigstore.bundle" + +func ErrValidationError(err error) error { + return fmt.Errorf("%w: %w", ErrValidation, err) +} + +type Bundle struct { + *protobundle.Bundle + hasInclusionPromise bool + hasInclusionProof bool +} + +func NewBundle(pbundle *protobundle.Bundle) (*Bundle, error) { + bundle := &Bundle{ + Bundle: pbundle, + hasInclusionPromise: false, + hasInclusionProof: false, + } + + err := bundle.validate() + if err != nil { + return nil, err + } + + return bundle, nil +} + +// Deprecated: use Bundle instead +type ProtobufBundle = Bundle + +// Deprecated: use NewBundle instead +func NewProtobufBundle(b *protobundle.Bundle) (*ProtobufBundle, error) { + return NewBundle(b) +} + +func (b *Bundle) validate() error { + bundleVersion, err := b.Version() + if err != nil { + return fmt.Errorf("error getting bundle version: %w", err) + } + + // if bundle version is < 0.1, return error + if semver.Compare(bundleVersion, "v0.1") < 0 { + return fmt.Errorf("%w: bundle version %s is not supported", ErrUnsupportedMediaType, bundleVersion) + } + + // fetch tlog entries, as next check needs to check them for inclusion proof/promise + entries, err := b.TlogEntries() + if err != nil { + return err + } + + // if bundle version == v0.1, require inclusion promise + if semver.Compare(bundleVersion, "v0.1") == 0 { + if len(entries) > 0 && !b.hasInclusionPromise { + return errors.New("inclusion promises missing in bundle (required for bundle v0.1)") + } + } else { + // if bundle version >= v0.2, require inclusion proof + if len(entries) > 0 && !b.hasInclusionProof { + return errors.New("inclusion proof missing in bundle (required for bundle v0.2)") + } + } + + // if bundle version >= v0.3, require verification material to not be X.509 certificate chain (only single certificate is allowed) + if semver.Compare(bundleVersion, "v0.3") >= 0 { + certs := b.VerificationMaterial.GetX509CertificateChain() + + if certs != nil { + return errors.New("verification material cannot be X.509 certificate chain (for bundle v0.3)") + } + } + + // if bundle version is >= v0.4, return error as this version is not supported + if semver.Compare(bundleVersion, "v0.4") >= 0 { + return fmt.Errorf("%w: bundle version %s is not yet supported", ErrUnsupportedMediaType, bundleVersion) + } + + err = validateBundle(b.Bundle) + if err != nil { + return fmt.Errorf("invalid bundle: %w", err) + } + return nil +} + +// MediaTypeString returns a mediatype string for the specified bundle version. +// The function returns an error if the resulting string does validate. +func MediaTypeString(version string) (string, error) { + if version == "" { + return "", fmt.Errorf("unable to build media type string, no version defined") + } + + var mtString string + + version = strings.TrimPrefix(version, "v") + mtString = fmt.Sprintf("%s.v%s+json", mediaTypeBase, strings.TrimPrefix(version, "v")) + + if version == "0.1" || version == "0.2" { + mtString = fmt.Sprintf("%s+json;version=%s", mediaTypeBase, strings.TrimPrefix(version, "v")) + } + + if _, err := getBundleVersion(mtString); err != nil { + return "", fmt.Errorf("unable to build mediatype: %w", err) + } + + return mtString, nil +} + +func (b *Bundle) Version() (string, error) { + return getBundleVersion(b.MediaType) +} + +func getBundleVersion(mediaType string) (string, error) { + switch mediaType { + case mediaTypeBase + "+json;version=0.1": + return "v0.1", nil + case mediaTypeBase + "+json;version=0.2": + return "v0.2", nil + case mediaTypeBase + "+json;version=0.3": + return "v0.3", nil + } + if strings.HasPrefix(mediaType, mediaTypeBase+".v") && strings.HasSuffix(mediaType, "+json") { + version := strings.TrimPrefix(mediaType, mediaTypeBase+".") + version = strings.TrimSuffix(version, "+json") + if semver.IsValid(version) { + return version, nil + } + return "", fmt.Errorf("%w: invalid bundle version: %s", ErrUnsupportedMediaType, version) + } + return "", fmt.Errorf("%w: %s", ErrUnsupportedMediaType, mediaType) +} + +func validateBundle(b *protobundle.Bundle) error { + if b == nil { + return ErrEmptyBundle + } + + if b.Content == nil { + return ErrMissingBundleContent + } + + switch b.Content.(type) { + case *protobundle.Bundle_DsseEnvelope, *protobundle.Bundle_MessageSignature: + default: + return fmt.Errorf("invalid bundle content: bundle content must be either a message signature or dsse envelope") + } + + if b.VerificationMaterial == nil || b.VerificationMaterial.Content == nil { + return ErrMissingVerificationMaterial + } + + switch b.VerificationMaterial.Content.(type) { + case *protobundle.VerificationMaterial_PublicKey, *protobundle.VerificationMaterial_Certificate, *protobundle.VerificationMaterial_X509CertificateChain: + default: + return fmt.Errorf("invalid verification material content: verification material must be one of public key, x509 certificate and x509 certificate chain") + } + + return nil +} + +func LoadJSONFromPath(path string) (*Bundle, error) { + var bundle Bundle + bundle.Bundle = new(protobundle.Bundle) + + contents, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + err = bundle.UnmarshalJSON(contents) + if err != nil { + return nil, err + } + + return &bundle, nil +} + +func (b *Bundle) MarshalJSON() ([]byte, error) { + return protojson.Marshal(b.Bundle) +} + +func (b *Bundle) UnmarshalJSON(data []byte) error { + b.Bundle = new(protobundle.Bundle) + err := protojson.Unmarshal(data, b.Bundle) + if err != nil { + return err + } + + err = b.validate() + if err != nil { + return err + } + + return nil +} + +func (b *Bundle) VerificationContent() (verify.VerificationContent, error) { + if b.VerificationMaterial == nil { + return nil, ErrMissingVerificationMaterial + } + + switch content := b.VerificationMaterial.GetContent().(type) { + case *protobundle.VerificationMaterial_X509CertificateChain: + if content.X509CertificateChain == nil { + return nil, ErrMissingVerificationMaterial + } + certs := content.X509CertificateChain.GetCertificates() + if len(certs) == 0 || certs[0].RawBytes == nil { + return nil, ErrMissingVerificationMaterial + } + parsedCert, err := x509.ParseCertificate(certs[0].RawBytes) + if err != nil { + return nil, ErrValidationError(err) + } + cert := &Certificate{ + certificate: parsedCert, + } + return cert, nil + case *protobundle.VerificationMaterial_Certificate: + if content.Certificate == nil || content.Certificate.RawBytes == nil { + return nil, ErrMissingVerificationMaterial + } + parsedCert, err := x509.ParseCertificate(content.Certificate.RawBytes) + if err != nil { + return nil, ErrValidationError(err) + } + cert := &Certificate{ + certificate: parsedCert, + } + return cert, nil + case *protobundle.VerificationMaterial_PublicKey: + if content.PublicKey == nil { + return nil, ErrMissingVerificationMaterial + } + pk := &PublicKey{ + hint: content.PublicKey.Hint, + } + return pk, nil + + default: + return nil, ErrMissingVerificationMaterial + } +} + +func (b *Bundle) HasInclusionPromise() bool { + return b.hasInclusionPromise +} + +func (b *Bundle) HasInclusionProof() bool { + return b.hasInclusionProof +} + +func (b *Bundle) TlogEntries() ([]*tlog.Entry, error) { + if b.VerificationMaterial == nil { + return nil, nil + } + + tlogEntries := make([]*tlog.Entry, len(b.VerificationMaterial.TlogEntries)) + var err error + for i, entry := range b.VerificationMaterial.TlogEntries { + tlogEntries[i], err = tlog.ParseTransparencyLogEntry(entry) + if err != nil { + return nil, ErrValidationError(err) + } + + if tlogEntries[i].HasInclusionPromise() { + b.hasInclusionPromise = true + } + if tlogEntries[i].HasInclusionProof() { + b.hasInclusionProof = true + } + } + + return tlogEntries, nil +} + +func (b *Bundle) SignatureContent() (verify.SignatureContent, error) { + switch content := b.Content.(type) { //nolint:gocritic + case *protobundle.Bundle_DsseEnvelope: + envelope, err := parseEnvelope(content.DsseEnvelope) + if err != nil { + return nil, err + } + return envelope, nil + case *protobundle.Bundle_MessageSignature: + if content.MessageSignature == nil || content.MessageSignature.MessageDigest == nil { + return nil, ErrMissingVerificationMaterial + } + return NewMessageSignature( + content.MessageSignature.MessageDigest.Digest, + protocommon.HashAlgorithm_name[int32(content.MessageSignature.MessageDigest.Algorithm)], + content.MessageSignature.Signature, + ), nil + } + return nil, ErrMissingVerificationMaterial +} + +func (b *Bundle) Envelope() (*Envelope, error) { + switch content := b.Content.(type) { //nolint:gocritic + case *protobundle.Bundle_DsseEnvelope: + envelope, err := parseEnvelope(content.DsseEnvelope) + if err != nil { + return nil, err + } + return envelope, nil + } + return nil, ErrMissingVerificationMaterial +} + +func (b *Bundle) Timestamps() ([][]byte, error) { + if b.VerificationMaterial == nil { + return nil, ErrMissingVerificationMaterial + } + + signedTimestamps := make([][]byte, 0) + + if b.VerificationMaterial.TimestampVerificationData == nil { + return signedTimestamps, nil + } + + for _, timestamp := range b.VerificationMaterial.TimestampVerificationData.Rfc3161Timestamps { + signedTimestamps = append(signedTimestamps, timestamp.SignedTimestamp) + } + + return signedTimestamps, nil +} + +// MinVersion returns true if the bundle version is greater than or equal to the expected version. +func (b *Bundle) MinVersion(expectVersion string) bool { + version, err := b.Version() + if err != nil { + return false + } + + if !strings.HasPrefix(expectVersion, "v") { + expectVersion = "v" + expectVersion + } + + return semver.Compare(version, expectVersion) >= 0 +} + +func parseEnvelope(input *protodsse.Envelope) (*Envelope, error) { + if input == nil { + return nil, ErrMissingEnvelope + } + output := &dsse.Envelope{} + payload := input.GetPayload() + if payload == nil { + return nil, ErrMissingEnvelope + } + output.Payload = base64.StdEncoding.EncodeToString([]byte(payload)) + output.PayloadType = string(input.GetPayloadType()) + output.Signatures = make([]dsse.Signature, len(input.GetSignatures())) + for i, sig := range input.GetSignatures() { + if sig == nil { + return nil, ErrMissingEnvelope + } + output.Signatures[i].KeyID = sig.GetKeyid() + output.Signatures[i].Sig = base64.StdEncoding.EncodeToString(sig.GetSig()) + } + return &Envelope{Envelope: output}, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go new file mode 100644 index 00000000000..b3e6dd69e6c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go @@ -0,0 +1,106 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "encoding/base64" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/verify" + "google.golang.org/protobuf/encoding/protojson" +) + +const IntotoMediaType = "application/vnd.in-toto+json" + +type MessageSignature struct { + digest []byte + digestAlgorithm string + signature []byte +} + +func (m *MessageSignature) Digest() []byte { + return m.digest +} + +func (m *MessageSignature) DigestAlgorithm() string { + return m.digestAlgorithm +} + +func NewMessageSignature(digest []byte, digestAlgorithm string, signature []byte) *MessageSignature { + return &MessageSignature{ + digest: digest, + digestAlgorithm: digestAlgorithm, + signature: signature, + } +} + +type Envelope struct { + *dsse.Envelope +} + +func (e *Envelope) Statement() (*in_toto.Statement, error) { + if e.PayloadType != IntotoMediaType { + return nil, ErrUnsupportedMediaType + } + + var statement in_toto.Statement + raw, err := e.DecodeB64Payload() + if err != nil { + return nil, ErrDecodingB64 + } + err = protojson.Unmarshal(raw, &statement) + if err != nil { + return nil, ErrDecodingJSON + } + return &statement, nil +} + +func (e *Envelope) EnvelopeContent() verify.EnvelopeContent { + return e +} + +func (e *Envelope) RawEnvelope() *dsse.Envelope { + return e.Envelope +} + +func (m *MessageSignature) EnvelopeContent() verify.EnvelopeContent { + return nil +} + +func (e *Envelope) MessageSignatureContent() verify.MessageSignatureContent { + return nil +} + +func (m *MessageSignature) MessageSignatureContent() verify.MessageSignatureContent { + return m +} + +func (m *MessageSignature) Signature() []byte { + return m.signature +} + +func (e *Envelope) Signature() []byte { + if len(e.Signatures) == 0 { + return []byte{} + } + + sigBytes, err := base64.StdEncoding.DecodeString(e.Signatures[0].Sig) + if err != nil { + return []byte{} + } + + return sigBytes +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go new file mode 100644 index 00000000000..86e1e0bbc0c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go @@ -0,0 +1,92 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "crypto" + "crypto/x509" + "time" + + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/verify" +) + +type Certificate struct { + certificate *x509.Certificate +} + +func NewCertificate(cert *x509.Certificate) *Certificate { + return &Certificate{certificate: cert} +} + +type PublicKey struct { + hint string +} + +func (pk PublicKey) Hint() string { + return pk.hint +} + +func (c *Certificate) CompareKey(key any, _ root.TrustedMaterial) bool { + x509Key, ok := key.(*x509.Certificate) + if !ok { + return false + } + + return c.certificate.Equal(x509Key) +} + +func (c *Certificate) ValidAtTime(t time.Time, _ root.TrustedMaterial) bool { + return !c.certificate.NotAfter.Before(t) && !c.certificate.NotBefore.After(t) +} + +func (c *Certificate) Certificate() *x509.Certificate { + return c.certificate +} + +func (c *Certificate) PublicKey() verify.PublicKeyProvider { + return nil +} + +func (pk *PublicKey) CompareKey(key any, tm root.TrustedMaterial) bool { + verifier, err := tm.PublicKeyVerifier(pk.hint) + if err != nil { + return false + } + pubKey, err := verifier.PublicKey() + if err != nil { + return false + } + if equaler, ok := key.(interface{ Equal(x crypto.PublicKey) bool }); ok { + return equaler.Equal(pubKey) + } + return false +} + +func (pk *PublicKey) ValidAtTime(t time.Time, tm root.TrustedMaterial) bool { + verifier, err := tm.PublicKeyVerifier(pk.hint) + if err != nil { + return false + } + return verifier.ValidAtTime(t) +} + +func (pk *PublicKey) Certificate() *x509.Certificate { + return nil +} + +func (pk *PublicKey) PublicKey() verify.PublicKeyProvider { + return pk +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go new file mode 100644 index 00000000000..e9a0680d162 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go @@ -0,0 +1,239 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a verbatim copy of https://github.com/sigstore/fulcio/blob/3707d80bb25330bc7ffbd9702fb401cd643e36fa/pkg/certificate/extensions.go , +// EXCEPT: +// - the parseExtensions func has been renamed ParseExtensions + +package certificate + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // Deprecated: Use OIDIssuerV2 + OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} + // Deprecated: Use OIDBuildTrigger + OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} + // Deprecated: Use OIDSourceRepositoryDigest + OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} + // Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest + OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} + // Deprecated: Use SourceRepositoryURI + OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5} + // Deprecated: Use OIDSourceRepositoryRef + OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} + + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8} + + // CI extensions + OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} + OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} + OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} + OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} + OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} + OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} + OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} + OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} + OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} + OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} + OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} + OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} + OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} + OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22} +) + +// Extensions contains all custom x509 extensions defined by Fulcio +type Extensions struct { + // NB: New extensions must be added here and documented + // at docs/oidc-info.md + + // The OIDC issuer. Should match `iss` claim of ID token or, in the case of + // a federated login like Dex it should match the issuer URL of the + // upstream issuer. The issuer is not set the extensions are invalid and + // will fail to render. + Issuer string `json:"issuer,omitempty"` // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated) + + // Deprecated + // Triggering event of the Github Workflow. Matches the `event_name` claim of ID + // tokens from Github Actions + GithubWorkflowTrigger string `json:"githubWorkflowTrigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2 + + // Deprecated + // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID + // tokens from Github Actions + GithubWorkflowSHA string `json:"githubWorkflowSHA,omitempty"` //nolint:tagliatelle // OID 1.3.6.1.4.1.57264.1.3 + + // Deprecated + // Name of Github Actions Workflow. Matches the `workflow` claim of the ID + // tokens from Github Actions + GithubWorkflowName string `json:"githubWorkflowName,omitempty"` // OID 1.3.6.1.4.1.57264.1.4 + + // Deprecated + // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID + // tokens from Github Actions + GithubWorkflowRepository string `json:"githubWorkflowRepository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5 + + // Deprecated + // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens + // from Github Actions + GithubWorkflowRef string `json:"githubWorkflowRef,omitempty"` // 1.3.6.1.4.1.57264.1.6 + + // Reference to specific build instructions that are responsible for signing. + BuildSignerURI string `json:"buildSignerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.9 + + // Immutable reference to the specific version of the build instructions that is responsible for signing. + BuildSignerDigest string `json:"buildSignerDigest,omitempty"` // 1.3.6.1.4.1.57264.1.10 + + // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + RunnerEnvironment string `json:"runnerEnvironment,omitempty"` // 1.3.6.1.4.1.57264.1.11 + + // Source repository URL that the build was based on. + SourceRepositoryURI string `json:"sourceRepositoryURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.12 + + // Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryDigest string `json:"sourceRepositoryDigest,omitempty"` // 1.3.6.1.4.1.57264.1.13 + + // Source Repository Ref that the build run was based upon. + SourceRepositoryRef string `json:"sourceRepositoryRef,omitempty"` // 1.3.6.1.4.1.57264.1.14 + + // Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryIdentifier string `json:"sourceRepositoryIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 + + // Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerURI string `json:"sourceRepositoryOwnerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.16 + + // Immutable identifier for the owner of the source repository that the workflow was based upon. + SourceRepositoryOwnerIdentifier string `json:"sourceRepositoryOwnerIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 + + // Build Config URL to the top-level/initiating build instructions. + BuildConfigURI string `json:"buildConfigURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.18 + + // Immutable reference to the specific version of the top-level/initiating build instructions. + BuildConfigDigest string `json:"buildConfigDigest,omitempty"` // 1.3.6.1.4.1.57264.1.19 + + // Event or action that initiated the build. + BuildTrigger string `json:"buildTrigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 + + // Run Invocation URL to uniquely identify the build execution. + RunInvocationURI string `json:"runInvocationURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.21 + + // Source repository visibility at the time of signing the certificate. + SourceRepositoryVisibilityAtSigning string `json:"sourceRepositoryVisibilityAtSigning,omitempty"` // 1.3.6.1.4.1.57264.1.22 +} + +func ParseExtensions(ext []pkix.Extension) (Extensions, error) { + out := Extensions{} + + for _, e := range ext { + switch { + // BEGIN: Deprecated + case e.Id.Equal(OIDIssuer): + out.Issuer = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowTrigger): + out.GithubWorkflowTrigger = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowSHA): + out.GithubWorkflowSHA = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowName): + out.GithubWorkflowName = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRepository): + out.GithubWorkflowRepository = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRef): + out.GithubWorkflowRef = string(e.Value) + // END: Deprecated + case e.Id.Equal(OIDIssuerV2): + if err := ParseDERString(e.Value, &out.Issuer); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerURI): + if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerDigest): + if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunnerEnvironment): + if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryDigest): + if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryRef): + if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigURI): + if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigDigest): + if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildTrigger): + if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunInvocationURI): + if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning): + if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil { + return Extensions{}, err + } + } + } + + // We only ever return nil, but leaving error in place so that we can add + // more complex parsing of fields in a backwards compatible way if needed. + return out, nil +} + +// ParseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func ParseDERString(val []byte, parsedVal *string) error { + rest, err := asn1.Unmarshal(val, parsedVal) + if err != nil { + return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %w", err) + } + if len(rest) != 0 { + return errors.New("unexpected trailing bytes in DER-encoded string") + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go new file mode 100644 index 00000000000..da9a6661a90 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go @@ -0,0 +1,90 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certificate + +import ( + "crypto/x509" + "errors" + "fmt" + "reflect" + + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Summary struct { + CertificateIssuer string `json:"certificateIssuer"` + SubjectAlternativeName string `json:"subjectAlternativeName"` + Extensions +} + +type ErrCompareExtensions struct { + field string + expected string + actual string +} + +func (e *ErrCompareExtensions) Error() string { + return fmt.Sprintf("expected %s to be \"%s\", got \"%s\"", e.field, e.expected, e.actual) +} + +func SummarizeCertificate(cert *x509.Certificate) (Summary, error) { + extensions, err := ParseExtensions(cert.Extensions) + + if err != nil { + return Summary{}, err + } + + var san string + + switch { + case len(cert.URIs) > 0: + san = cert.URIs[0].String() + case len(cert.EmailAddresses) > 0: + san = cert.EmailAddresses[0] + } + if san == "" { + san, _ = cryptoutils.UnmarshalOtherNameSAN(cert.Extensions) + } + if san == "" { + return Summary{}, errors.New("no Subject Alternative Name found") + } + + return Summary{CertificateIssuer: cert.Issuer.String(), SubjectAlternativeName: san, Extensions: extensions}, nil +} + +// CompareExtensions compares two Extensions structs and returns an error if +// any set values in the expected struct not equal. Empty fields in the +// expectedExt struct are ignored. +func CompareExtensions(expectedExt, actualExt Extensions) error { + expExtValue := reflect.ValueOf(expectedExt) + actExtValue := reflect.ValueOf(actualExt) + + fields := reflect.VisibleFields(expExtValue.Type()) + for _, field := range fields { + expectedFieldVal := expExtValue.FieldByName(field.Name) + + // if the expected field is empty, skip it + if expectedFieldVal.IsValid() && !expectedFieldVal.IsZero() { + actualFieldVal := actExtValue.FieldByName(field.Name) + if actualFieldVal.IsValid() { + if expectedFieldVal.Interface() != actualFieldVal.Interface() { + return &ErrCompareExtensions{field.Name, fmt.Sprintf("%v", expectedFieldVal.Interface()), fmt.Sprintf("%v", actualFieldVal.Interface())} + } + } + } + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go new file mode 100644 index 00000000000..5e1cb67cca2 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go @@ -0,0 +1,66 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto/x509" + "errors" + "time" +) + +type CertificateAuthority interface { + Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) +} + +type FulcioCertificateAuthority struct { + Root *x509.Certificate + Intermediates []*x509.Certificate + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + URI string +} + +var _ CertificateAuthority = &FulcioCertificateAuthority{} + +func (ca *FulcioCertificateAuthority) Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) { + if !ca.ValidityPeriodStart.IsZero() && observerTimestamp.Before(ca.ValidityPeriodStart) { + return nil, errors.New("certificate is not valid yet") + } + if !ca.ValidityPeriodEnd.IsZero() && observerTimestamp.After(ca.ValidityPeriodEnd) { + return nil, errors.New("certificate is no longer valid") + } + + rootCertPool := x509.NewCertPool() + rootCertPool.AddCert(ca.Root) + intermediateCertPool := x509.NewCertPool() + for _, cert := range ca.Intermediates { + intermediateCertPool.AddCert(cert) + } + + // From spec: + // > ## Certificate + // > For a signature with a given certificate to be considered valid, it must have a timestamp while every certificate in the chain up to the root is valid (the so-called “hybrid model” of certificate verification per Braun et al. (2013)). + + opts := x509.VerifyOptions{ + CurrentTime: observerTimestamp, + Roots: rootCertPool, + Intermediates: intermediateCertPool, + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageCodeSigning, + }, + } + + return cert.Verify(opts) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go new file mode 100644 index 00000000000..a86fe0b2c36 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go @@ -0,0 +1,457 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "math/rand" + "os" + "slices" + "time" + + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/tuf" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const SigningConfigMediaType02 = "application/vnd.dev.sigstore.signingconfig.v0.2+json" + +type SigningConfig struct { + signingConfig *prototrustroot.SigningConfig +} + +type Service struct { + URL string + MajorAPIVersion uint32 + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + Operator string +} + +type ServiceConfiguration struct { + Selector prototrustroot.ServiceSelector + Count uint32 +} + +func NewService(s *prototrustroot.Service) Service { + validFor := s.GetValidFor() + + var start time.Time + if validFor.GetStart() != nil { + start = validFor.GetStart().AsTime() + } + + var end time.Time + if validFor.GetEnd() != nil { + end = validFor.GetEnd().AsTime() + } + + return Service{ + URL: s.GetUrl(), + MajorAPIVersion: s.GetMajorApiVersion(), + ValidityPeriodStart: start, + ValidityPeriodEnd: end, + Operator: s.GetOperator(), + } +} + +// SelectService returns which service endpoint should be used based on supported API versions +// and current time. It will select the first service with the highest API version that matches +// the criteria. Services should be sorted from newest to oldest validity period start time, to +// minimize how far clients need to search to find a matching service. +func SelectService(services []Service, supportedAPIVersions []uint32, currentTime time.Time) (Service, error) { + if len(supportedAPIVersions) == 0 { + return Service{}, fmt.Errorf("no supported API versions") + } + + // Order supported versions from highest to lowest + sortedVersions := make([]uint32, len(supportedAPIVersions)) + copy(sortedVersions, supportedAPIVersions) + slices.Sort(sortedVersions) + slices.Reverse(sortedVersions) + + // Order services from newest to oldest + sortedServices := make([]Service, len(services)) + copy(sortedServices, services) + slices.SortFunc(sortedServices, func(i, j Service) int { + return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart) + }) + slices.Reverse(sortedServices) + + for _, version := range sortedVersions { + for _, s := range sortedServices { + if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) { + return s, nil + } + } + } + + return Service{}, fmt.Errorf("no matching service found for API versions %v and current time %v", supportedAPIVersions, currentTime) +} + +// SelectServices returns which service endpoints should be used based on supported API versions +// and current time. It will use the configuration's selector to pick a set of services. +// ALL will return all service endpoints, ANY will return a random endpoint, and +// EXACT will return a random selection of a specified number of endpoints. +// It will select services from the highest supported API versions and will not select +// services from different API versions. It will select distinct service operators, selecting +// at most one service per operator. +func SelectServices(services []Service, config ServiceConfiguration, supportedAPIVersions []uint32, currentTime time.Time) ([]Service, error) { + if len(supportedAPIVersions) == 0 { + return nil, fmt.Errorf("no supported API versions") + } + + // Order supported versions from highest to lowest + sortedVersions := make([]uint32, len(supportedAPIVersions)) + copy(sortedVersions, supportedAPIVersions) + slices.Sort(sortedVersions) + slices.Reverse(sortedVersions) + + // Order services from newest to oldest + sortedServices := make([]Service, len(services)) + copy(sortedServices, services) + slices.SortFunc(sortedServices, func(i, j Service) int { + return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart) + }) + slices.Reverse(sortedServices) + + operators := make(map[string]bool) + var selectedServices []Service + for _, version := range sortedVersions { + for _, s := range sortedServices { + if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) { + // Select the newest service for a given operator + if !operators[s.Operator] { + operators[s.Operator] = true + selectedServices = append(selectedServices, s) + } + } + } + // Exit once a list of services is found + if len(selectedServices) != 0 { + break + } + } + + if len(selectedServices) == 0 { + return nil, fmt.Errorf("no matching services found for API versions %v and current time %v", supportedAPIVersions, currentTime) + } + + // Select services from the highest supported API version + switch config.Selector { + case prototrustroot.ServiceSelector_ALL: + return selectedServices, nil + case prototrustroot.ServiceSelector_ANY: + i := rand.Intn(len(selectedServices)) // #nosec G404 + return []Service{selectedServices[i]}, nil + case prototrustroot.ServiceSelector_EXACT: + matchedUrls, err := selectExact(selectedServices, config.Count) + if err != nil { + return nil, err + } + return matchedUrls, nil + default: + return nil, fmt.Errorf("invalid service selector") + } +} + +func selectExact[T any](slice []T, count uint32) ([]T, error) { + if count == 0 { + return nil, fmt.Errorf("service selector count must be greater than 0") + } + if int(count) > len(slice) { + return nil, fmt.Errorf("service selector count %d must be less than or equal to the slice length %d", count, len(slice)) + } + sliceCopy := make([]T, len(slice)) + copy(sliceCopy, slice) + var result []T + for range count { + i := rand.Intn(len(sliceCopy)) // #nosec G404 + result = append(result, sliceCopy[i]) + // Remove element from slice + sliceCopy[i], sliceCopy[len(sliceCopy)-1] = sliceCopy[len(sliceCopy)-1], sliceCopy[i] + sliceCopy = sliceCopy[:len(sliceCopy)-1] + } + return result, nil +} + +func mapFunc[T, V any](ts []T, fn func(T) V) []V { + result := make([]V, len(ts)) + for i, t := range ts { + result[i] = fn(t) + } + return result +} + +func (s Service) ValidAtTime(t time.Time) bool { + if !s.ValidityPeriodStart.IsZero() && t.Before(s.ValidityPeriodStart) { + return false + } + if !s.ValidityPeriodEnd.IsZero() && t.After(s.ValidityPeriodEnd) { + return false + } + return true +} + +func (s Service) ToServiceProtobuf() *prototrustroot.Service { + tr := &v1.TimeRange{ + Start: timestamppb.New(s.ValidityPeriodStart), + } + if !s.ValidityPeriodEnd.IsZero() { + tr.End = timestamppb.New(s.ValidityPeriodEnd) + } + + return &prototrustroot.Service{ + Url: s.URL, + MajorApiVersion: s.MajorAPIVersion, + ValidFor: tr, + Operator: s.Operator, + } +} + +func (sc ServiceConfiguration) ToConfigProtobuf() *prototrustroot.ServiceConfiguration { + return &prototrustroot.ServiceConfiguration{ + Selector: sc.Selector, + Count: sc.Count, + } +} + +func (sc *SigningConfig) FulcioCertificateAuthorityURLs() []Service { + var services []Service + + for _, s := range sc.signingConfig.GetCaUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) OIDCProviderURLs() []Service { + var services []Service + for _, s := range sc.signingConfig.GetOidcUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) RekorLogURLs() []Service { + var services []Service + for _, s := range sc.signingConfig.GetRekorTlogUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) RekorLogURLsConfig() ServiceConfiguration { + c := sc.signingConfig.GetRekorTlogConfig() + return ServiceConfiguration{ + Selector: c.Selector, + Count: c.Count, + } +} + +func (sc *SigningConfig) TimestampAuthorityURLs() []Service { + var services []Service + for _, s := range sc.signingConfig.GetTsaUrls() { + services = append(services, NewService(s)) + } + return services +} + +func (sc *SigningConfig) TimestampAuthorityURLsConfig() ServiceConfiguration { + c := sc.signingConfig.GetTsaConfig() + return ServiceConfiguration{ + Selector: c.Selector, + Count: c.Count, + } +} + +func (sc *SigningConfig) WithFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range fulcioURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.CaUrls = services + return sc +} + +func (sc *SigningConfig) AddFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig { + for _, u := range fulcioURLs { + sc.signingConfig.CaUrls = append(sc.signingConfig.CaUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithOIDCProviderURLs(oidcURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range oidcURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.OidcUrls = services + return sc +} + +func (sc *SigningConfig) AddOIDCProviderURLs(oidcURLs ...Service) *SigningConfig { + for _, u := range oidcURLs { + sc.signingConfig.OidcUrls = append(sc.signingConfig.OidcUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithRekorLogURLs(logURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range logURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.RekorTlogUrls = services + return sc +} + +func (sc *SigningConfig) AddRekorLogURLs(logURLs ...Service) *SigningConfig { + for _, u := range logURLs { + sc.signingConfig.RekorTlogUrls = append(sc.signingConfig.RekorTlogUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithRekorTlogConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig { + sc.signingConfig.RekorTlogConfig.Selector = selector + sc.signingConfig.RekorTlogConfig.Count = count + return sc +} + +func (sc *SigningConfig) WithTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig { + var services []*prototrustroot.Service + for _, u := range tsaURLs { + services = append(services, u.ToServiceProtobuf()) + } + sc.signingConfig.TsaUrls = services + return sc +} + +func (sc *SigningConfig) AddTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig { + for _, u := range tsaURLs { + sc.signingConfig.TsaUrls = append(sc.signingConfig.TsaUrls, u.ToServiceProtobuf()) + } + return sc +} + +func (sc *SigningConfig) WithTsaConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig { + sc.signingConfig.TsaConfig.Selector = selector + sc.signingConfig.TsaConfig.Count = count + return sc +} + +func (sc SigningConfig) String() string { + return fmt.Sprintf("{CA: %v, OIDC: %v, RekorLogs: %v, TSAs: %v, MediaType: %s}", + sc.FulcioCertificateAuthorityURLs(), + sc.OIDCProviderURLs(), + sc.RekorLogURLs(), + sc.TimestampAuthorityURLs(), + SigningConfigMediaType02) +} + +func (sc SigningConfig) MarshalJSON() ([]byte, error) { + return protojson.Marshal(sc.signingConfig) +} + +// NewSigningConfig initializes a SigningConfig object from a mediaType string, Fulcio certificate +// authority URLs, OIDC provider URLs, Rekor transparency log URLs, timestamp authorities URLs, +// selection criteria for Rekor logs and TSAs. +func NewSigningConfig(mediaType string, + fulcioCertificateAuthorities []Service, + oidcProviders []Service, + rekorLogs []Service, + rekorLogsConfig ServiceConfiguration, + timestampAuthorities []Service, + timestampAuthoritiesConfig ServiceConfiguration) (*SigningConfig, error) { + if mediaType != SigningConfigMediaType02 { + return nil, fmt.Errorf("unsupported SigningConfig media type, must be: %s", SigningConfigMediaType02) + } + sc := &SigningConfig{ + signingConfig: &prototrustroot.SigningConfig{ + MediaType: mediaType, + CaUrls: mapFunc(fulcioCertificateAuthorities, Service.ToServiceProtobuf), + OidcUrls: mapFunc(oidcProviders, Service.ToServiceProtobuf), + RekorTlogUrls: mapFunc(rekorLogs, Service.ToServiceProtobuf), + RekorTlogConfig: rekorLogsConfig.ToConfigProtobuf(), + TsaUrls: mapFunc(timestampAuthorities, Service.ToServiceProtobuf), + TsaConfig: timestampAuthoritiesConfig.ToConfigProtobuf(), + }, + } + return sc, nil +} + +// NewSigningConfigFromProtobuf returns a Sigstore signing configuration. +func NewSigningConfigFromProtobuf(sc *prototrustroot.SigningConfig) (*SigningConfig, error) { + if sc.GetMediaType() != SigningConfigMediaType02 { + return nil, fmt.Errorf("unsupported SigningConfig media type: %s", sc.GetMediaType()) + } + return &SigningConfig{signingConfig: sc}, nil +} + +// NewSigningConfigFromPath returns a Sigstore signing configuration from a file. +func NewSigningConfigFromPath(path string) (*SigningConfig, error) { + scJSON, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return NewSigningConfigFromJSON(scJSON) +} + +// NewSigningConfigFromJSON returns a Sigstore signing configuration from JSON. +func NewSigningConfigFromJSON(rootJSON []byte) (*SigningConfig, error) { + pbSC, err := NewSigningConfigProtobuf(rootJSON) + if err != nil { + return nil, err + } + + return NewSigningConfigFromProtobuf(pbSC) +} + +// NewSigningConfigProtobuf returns a Sigstore signing configuration as a protobuf. +func NewSigningConfigProtobuf(scJSON []byte) (*prototrustroot.SigningConfig, error) { + pbSC := &prototrustroot.SigningConfig{} + err := protojson.Unmarshal(scJSON, pbSC) + if err != nil { + return nil, err + } + return pbSC, nil +} + +// FetchSigningConfig fetches the public-good Sigstore signing configuration from TUF. +func FetchSigningConfig() (*SigningConfig, error) { + return FetchSigningConfigWithOptions(tuf.DefaultOptions()) +} + +// FetchSigningConfig fetches the public-good Sigstore signing configuration with the given options from TUF. +func FetchSigningConfigWithOptions(opts *tuf.Options) (*SigningConfig, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, err + } + return GetSigningConfig(client) +} + +// GetSigningConfig fetches the public-good Sigstore signing configuration target from TUF. +func GetSigningConfig(c *tuf.Client) (*SigningConfig, error) { + jsonBytes, err := c.GetTarget("signing_config.v0.2.json") + if err != nil { + return nil, err + } + return NewSigningConfigFromJSON(jsonBytes) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go new file mode 100644 index 00000000000..bde952cc116 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go @@ -0,0 +1,75 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "bytes" + "crypto/x509" + "errors" + "time" + + tsaverification "github.com/sigstore/timestamp-authority/v2/pkg/verification" +) + +type Timestamp struct { + Time time.Time + URI string +} + +type TimestampingAuthority interface { + Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) +} + +type SigstoreTimestampingAuthority struct { + Root *x509.Certificate + Intermediates []*x509.Certificate + Leaf *x509.Certificate + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + URI string +} + +var _ TimestampingAuthority = &SigstoreTimestampingAuthority{} + +func (tsa *SigstoreTimestampingAuthority) Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) { + if tsa.Root == nil { + var tsaURIDisplay string + if tsa.URI != "" { + tsaURIDisplay = tsa.URI + " " + } + return nil, errors.New("timestamping authority " + tsaURIDisplay + "root certificate is nil") + } + trustedRootVerificationOptions := tsaverification.VerifyOpts{ + Roots: []*x509.Certificate{tsa.Root}, + Intermediates: tsa.Intermediates, + TSACertificate: tsa.Leaf, + } + + // Ensure timestamp responses are from trusted sources + timestamp, err := tsaverification.VerifyTimestampResponse(signedTimestamp, bytes.NewReader(signatureBytes), trustedRootVerificationOptions) + if err != nil { + return nil, err + } + + if !tsa.ValidityPeriodStart.IsZero() && timestamp.Time.Before(tsa.ValidityPeriodStart) { + return nil, errors.New("timestamp is before the validity period start") + } + if !tsa.ValidityPeriodEnd.IsZero() && timestamp.Time.After(tsa.ValidityPeriodEnd) { + return nil, errors.New("timestamp is after the validity period end") + } + + // All above verification successful, so return nil + return &Timestamp{Time: timestamp.Time, URI: tsa.URI}, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go new file mode 100644 index 00000000000..d1ec4d46189 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "time" + + "github.com/sigstore/sigstore/pkg/signature" +) + +type TrustedMaterial interface { + TimestampingAuthorities() []TimestampingAuthority + FulcioCertificateAuthorities() []CertificateAuthority + RekorLogs() map[string]*TransparencyLog + CTLogs() map[string]*TransparencyLog + PublicKeyVerifier(string) (TimeConstrainedVerifier, error) +} + +type BaseTrustedMaterial struct{} + +func (b *BaseTrustedMaterial) TimestampingAuthorities() []TimestampingAuthority { + return []TimestampingAuthority{} +} + +func (b *BaseTrustedMaterial) FulcioCertificateAuthorities() []CertificateAuthority { + return []CertificateAuthority{} +} + +func (b *BaseTrustedMaterial) RekorLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) CTLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) PublicKeyVerifier(_ string) (TimeConstrainedVerifier, error) { + return nil, fmt.Errorf("public key verifier not found") +} + +type TrustedMaterialCollection []TrustedMaterial + +// Ensure types implement interfaces +var _ TrustedMaterial = &BaseTrustedMaterial{} +var _ TrustedMaterial = TrustedMaterialCollection{} + +func (tmc TrustedMaterialCollection) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + for _, tm := range tmc { + verifier, err := tm.PublicKeyVerifier(keyID) + if err == nil { + return verifier, nil + } + } + return nil, fmt.Errorf("public key verifier not found for keyID: %s", keyID) +} + +func (tmc TrustedMaterialCollection) TimestampingAuthorities() []TimestampingAuthority { + var timestampingAuthorities []TimestampingAuthority + for _, tm := range tmc { + timestampingAuthorities = append(timestampingAuthorities, tm.TimestampingAuthorities()...) + } + return timestampingAuthorities +} + +func (tmc TrustedMaterialCollection) FulcioCertificateAuthorities() []CertificateAuthority { + var certAuthorities []CertificateAuthority + for _, tm := range tmc { + certAuthorities = append(certAuthorities, tm.FulcioCertificateAuthorities()...) + } + return certAuthorities +} + +func (tmc TrustedMaterialCollection) RekorLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.RekorLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +func (tmc TrustedMaterialCollection) CTLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.CTLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +type ValidityPeriodChecker interface { + ValidAtTime(time.Time) bool +} + +type TimeConstrainedVerifier interface { + ValidityPeriodChecker + signature.Verifier +} + +type TrustedPublicKeyMaterial struct { + BaseTrustedMaterial + publicKeyVerifier func(string) (TimeConstrainedVerifier, error) +} + +func (tr *TrustedPublicKeyMaterial) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + return tr.publicKeyVerifier(keyID) +} + +func NewTrustedPublicKeyMaterial(publicKeyVerifier func(string) (TimeConstrainedVerifier, error)) *TrustedPublicKeyMaterial { + return &TrustedPublicKeyMaterial{ + publicKeyVerifier: publicKeyVerifier, + } +} + +// ExpiringKey is a TimeConstrainedVerifier with a static validity period. +type ExpiringKey struct { + signature.Verifier + validityPeriodStart time.Time + validityPeriodEnd time.Time +} + +var _ TimeConstrainedVerifier = &ExpiringKey{} + +// ValidAtTime returns true if the key is valid at the given time. If the +// validity period start time is not set, the key is considered valid for all +// times before the end time. Likewise, if the validity period end time is not +// set, the key is considered valid for all times after the start time. +func (k *ExpiringKey) ValidAtTime(t time.Time) bool { + if !k.validityPeriodStart.IsZero() && t.Before(k.validityPeriodStart) { + return false + } + if !k.validityPeriodEnd.IsZero() && t.After(k.validityPeriodEnd) { + return false + } + return true +} + +// NewExpiringKey returns a new ExpiringKey with the given validity period +func NewExpiringKey(verifier signature.Verifier, validityPeriodStart, validityPeriodEnd time.Time) *ExpiringKey { + return &ExpiringKey{ + Verifier: verifier, + validityPeriodStart: validityPeriodStart, + validityPeriodEnd: validityPeriodEnd, + } +} + +// NewTrustedPublicKeyMaterialFromMapping returns a TrustedPublicKeyMaterial from a map of key IDs to +// ExpiringKeys. +func NewTrustedPublicKeyMaterialFromMapping(trustedPublicKeys map[string]*ExpiringKey) *TrustedPublicKeyMaterial { + return NewTrustedPublicKeyMaterial(func(keyID string) (TimeConstrainedVerifier, error) { + expiringKey, ok := trustedPublicKeys[keyID] + if !ok { + return nil, fmt.Errorf("public key not found for keyID: %s", keyID) + } + return expiringKey, nil + }) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go new file mode 100644 index 00000000000..0bd69d6f148 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go @@ -0,0 +1,556 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "fmt" + "log" + "os" + "sync" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/tuf" + "google.golang.org/protobuf/encoding/protojson" +) + +const TrustedRootMediaType01 = "application/vnd.dev.sigstore.trustedroot+json;version=0.1" + +type TrustedRoot struct { + BaseTrustedMaterial + trustedRoot *prototrustroot.TrustedRoot + rekorLogs map[string]*TransparencyLog + certificateAuthorities []CertificateAuthority + ctLogs map[string]*TransparencyLog + timestampingAuthorities []TimestampingAuthority +} + +type TransparencyLog struct { + BaseURL string + ID []byte + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + // This is the hash algorithm used by the Merkle tree + HashFunc crypto.Hash + PublicKey crypto.PublicKey + // The hash algorithm used during signature creation + SignatureHashFunc crypto.Hash +} + +const ( + defaultTrustedRoot = "trusted_root.json" +) + +func (tr *TrustedRoot) TimestampingAuthorities() []TimestampingAuthority { + return tr.timestampingAuthorities +} + +func (tr *TrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + return tr.certificateAuthorities +} + +func (tr *TrustedRoot) RekorLogs() map[string]*TransparencyLog { + return tr.rekorLogs +} + +func (tr *TrustedRoot) CTLogs() map[string]*TransparencyLog { + return tr.ctLogs +} + +func (tr *TrustedRoot) MarshalJSON() ([]byte, error) { + err := tr.constructProtoTrustRoot() + if err != nil { + return nil, fmt.Errorf("failed constructing protobuf TrustRoot representation: %w", err) + } + + return protojson.Marshal(tr.trustedRoot) +} + +func NewTrustedRootFromProtobuf(protobufTrustedRoot *prototrustroot.TrustedRoot) (trustedRoot *TrustedRoot, err error) { + if protobufTrustedRoot.GetMediaType() != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", protobufTrustedRoot.GetMediaType()) + } + + trustedRoot = &TrustedRoot{trustedRoot: protobufTrustedRoot} + trustedRoot.rekorLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetTlogs()) + if err != nil { + return nil, err + } + + trustedRoot.certificateAuthorities, err = ParseCertificateAuthorities(protobufTrustedRoot.GetCertificateAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.timestampingAuthorities, err = ParseTimestampingAuthorities(protobufTrustedRoot.GetTimestampAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.ctLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetCtlogs()) + if err != nil { + return nil, err + } + + return trustedRoot, nil +} + +func ParseTransparencyLogs(tlogs []*prototrustroot.TransparencyLogInstance) (transparencyLogs map[string]*TransparencyLog, err error) { + transparencyLogs = make(map[string]*TransparencyLog) + for _, tlog := range tlogs { + if tlog.GetHashAlgorithm() != protocommon.HashAlgorithm_SHA2_256 { + return nil, fmt.Errorf("unsupported tlog hash algorithm: %s", tlog.GetHashAlgorithm()) + } + if tlog.GetLogId() == nil { + return nil, fmt.Errorf("tlog missing log ID") + } + if tlog.GetLogId().GetKeyId() == nil { + return nil, fmt.Errorf("tlog missing log ID key ID") + } + encodedKeyID := hex.EncodeToString(tlog.GetLogId().GetKeyId()) + + if tlog.GetPublicKey() == nil { + return nil, fmt.Errorf("tlog missing public key") + } + if tlog.GetPublicKey().GetRawBytes() == nil { + return nil, fmt.Errorf("tlog missing public key raw bytes") + } + + var hashFunc crypto.Hash + switch tlog.GetHashAlgorithm() { + case protocommon.HashAlgorithm_SHA2_256: + hashFunc = crypto.SHA256 + default: + return nil, fmt.Errorf("unsupported hash function for the tlog") + } + + tlogEntry := &TransparencyLog{ + BaseURL: tlog.GetBaseUrl(), + ID: tlog.GetLogId().GetKeyId(), + HashFunc: hashFunc, + SignatureHashFunc: crypto.SHA256, + } + + switch tlog.GetPublicKey().GetKeyDetails() { + case protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, + protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, + protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + var ecKey *ecdsa.PublicKey + var ok bool + if ecKey, ok = key.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not ECDSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = ecKey + // This key format has public key in PKIX RSA format and PKCS1#1v1.5 or RSASSA-PSS signature + case protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + var rsaKey *rsa.PublicKey + var ok bool + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = rsaKey + case protocommon.PublicKeyDetails_PKIX_ED25519: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + var edKey ed25519.PublicKey + var ok bool + if edKey, ok = key.(ed25519.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = edKey + // This key format is deprecated, but currently in use for Sigstore staging instance + case protocommon.PublicKeyDetails_PKCS1_RSA_PKCS1V5: //nolint:staticcheck + key, err := x509.ParsePKCS1PublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, fmt.Errorf("failed to parse public key for tlog: %s %w", + tlog.GetBaseUrl(), + err, + ) + } + tlogEntry.PublicKey = key + default: + return nil, fmt.Errorf("unsupported tlog public key type: %s", tlog.GetPublicKey().GetKeyDetails()) + } + + tlogEntry.SignatureHashFunc = getSignatureHashAlgo(tlogEntry.PublicKey) + transparencyLogs[encodedKeyID] = tlogEntry + + if validFor := tlog.GetPublicKey().GetValidFor(); validFor != nil { + if validFor.GetStart() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodStart = validFor.GetStart().AsTime() + } else { + return nil, fmt.Errorf("tlog missing public key validity period start time") + } + if validFor.GetEnd() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodEnd = validFor.GetEnd().AsTime() + } + } else { + return nil, fmt.Errorf("tlog missing public key validity period") + } + } + return transparencyLogs, nil +} + +func ParseCertificateAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (certificateAuthorities []CertificateAuthority, err error) { + certificateAuthorities = make([]CertificateAuthority, len(certAuthorities)) + for i, certAuthority := range certAuthorities { + certificateAuthority, err := ParseCertificateAuthority(certAuthority) + if err != nil { + return nil, err + } + certificateAuthorities[i] = certificateAuthority + } + return certificateAuthorities, nil +} + +func ParseCertificateAuthority(certAuthority *prototrustroot.CertificateAuthority) (*FulcioCertificateAuthority, error) { + if certAuthority == nil { + return nil, fmt.Errorf("CertificateAuthority is nil") + } + certChain := certAuthority.GetCertChain() + if certChain == nil { + return nil, fmt.Errorf("CertificateAuthority missing cert chain") + } + chainLen := len(certChain.GetCertificates()) + if chainLen < 1 { + return nil, fmt.Errorf("CertificateAuthority cert chain is empty") + } + + certificateAuthority := &FulcioCertificateAuthority{ + URI: certAuthority.Uri, + } + for i, cert := range certChain.GetCertificates() { + parsedCert, err := x509.ParseCertificate(cert.RawBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate for %s %w", + certAuthority.Uri, + err, + ) + } + if i < chainLen-1 { + certificateAuthority.Intermediates = append(certificateAuthority.Intermediates, parsedCert) + } else { + certificateAuthority.Root = parsedCert + } + } + validFor := certAuthority.GetValidFor() + if validFor != nil { + start := validFor.GetStart() + if start != nil { + certificateAuthority.ValidityPeriodStart = start.AsTime() + } + end := validFor.GetEnd() + if end != nil { + certificateAuthority.ValidityPeriodEnd = end.AsTime() + } + } + + certificateAuthority.URI = certAuthority.Uri + + return certificateAuthority, nil +} + +func ParseTimestampingAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (timestampingAuthorities []TimestampingAuthority, err error) { + timestampingAuthorities = make([]TimestampingAuthority, len(certAuthorities)) + for i, certAuthority := range certAuthorities { + timestampingAuthority, err := ParseTimestampingAuthority(certAuthority) + if err != nil { + return nil, err + } + timestampingAuthorities[i] = timestampingAuthority + } + return timestampingAuthorities, nil +} + +func ParseTimestampingAuthority(certAuthority *prototrustroot.CertificateAuthority) (TimestampingAuthority, error) { + if certAuthority == nil { + return nil, fmt.Errorf("CertificateAuthority is nil") + } + certChain := certAuthority.GetCertChain() + if certChain == nil { + return nil, fmt.Errorf("CertificateAuthority missing cert chain") + } + chainLen := len(certChain.GetCertificates()) + if chainLen < 1 { + return nil, fmt.Errorf("CertificateAuthority cert chain is empty") + } + + timestampingAuthority := &SigstoreTimestampingAuthority{ + URI: certAuthority.Uri, + } + for i, cert := range certChain.GetCertificates() { + parsedCert, err := x509.ParseCertificate(cert.RawBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate for %s %w", + certAuthority.Uri, + err, + ) + } + switch { + case i == 0 && !parsedCert.IsCA: + timestampingAuthority.Leaf = parsedCert + case i < chainLen-1: + timestampingAuthority.Intermediates = append(timestampingAuthority.Intermediates, parsedCert) + case i == chainLen-1: + timestampingAuthority.Root = parsedCert + } + } + validFor := certAuthority.GetValidFor() + if validFor != nil { + start := validFor.GetStart() + if start != nil { + timestampingAuthority.ValidityPeriodStart = start.AsTime() + } + end := validFor.GetEnd() + if end != nil { + timestampingAuthority.ValidityPeriodEnd = end.AsTime() + } + } + + timestampingAuthority.URI = certAuthority.Uri + + return timestampingAuthority, nil +} + +func NewTrustedRootFromPath(path string) (*TrustedRoot, error) { + trustedrootJSON, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read trusted root %w", + err, + ) + } + + return NewTrustedRootFromJSON(trustedrootJSON) +} + +// NewTrustedRootFromJSON returns the Sigstore trusted root. +func NewTrustedRootFromJSON(rootJSON []byte) (*TrustedRoot, error) { + pbTrustedRoot, err := NewTrustedRootProtobuf(rootJSON) + if err != nil { + return nil, err + } + + return NewTrustedRootFromProtobuf(pbTrustedRoot) +} + +// NewTrustedRootProtobuf returns the Sigstore trusted root as a protobuf. +func NewTrustedRootProtobuf(rootJSON []byte) (*prototrustroot.TrustedRoot, error) { + pbTrustedRoot := &prototrustroot.TrustedRoot{} + err := protojson.Unmarshal(rootJSON, pbTrustedRoot) + if err != nil { + return nil, fmt.Errorf("failed to proto-json unmarshal trusted root: %w", err) + } + return pbTrustedRoot, nil +} + +// NewTrustedRoot initializes a TrustedRoot object from a mediaType string, list of Fulcio +// certificate authorities, list of timestamp authorities and maps of ctlogs and rekor +// transparency log instances. +// mediaType must be TrustedRootMediaType01 ("application/vnd.dev.sigstore.trustedroot+json;version=0.1"). +func NewTrustedRoot(mediaType string, + certificateAuthorities []CertificateAuthority, + certificateTransparencyLogs map[string]*TransparencyLog, + timestampAuthorities []TimestampingAuthority, + transparencyLogs map[string]*TransparencyLog) (*TrustedRoot, error) { + // document that we assume 1 cert chain per target and with certs already ordered from leaf to root + if mediaType != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s, must be %s", mediaType, TrustedRootMediaType01) + } + tr := &TrustedRoot{ + certificateAuthorities: certificateAuthorities, + ctLogs: certificateTransparencyLogs, + timestampingAuthorities: timestampAuthorities, + rekorLogs: transparencyLogs, + } + return tr, nil +} + +// FetchTrustedRoot fetches the Sigstore trusted root from TUF and returns it. +func FetchTrustedRoot() (*TrustedRoot, error) { + return FetchTrustedRootWithOptions(tuf.DefaultOptions()) +} + +// FetchTrustedRootWithOptions fetches the trusted root from TUF with the given options and returns it. +func FetchTrustedRootWithOptions(opts *tuf.Options) (*TrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, fmt.Errorf("failed to create TUF client %w", err) + } + return GetTrustedRoot(client) +} + +// GetTrustedRoot returns the trusted root +func GetTrustedRoot(c *tuf.Client) (*TrustedRoot, error) { + jsonBytes, err := c.GetTarget(defaultTrustedRoot) + if err != nil { + return nil, fmt.Errorf("failed to get trusted root from TUF client %w", + err, + ) + } + return NewTrustedRootFromJSON(jsonBytes) +} + +func getSignatureHashAlgo(pubKey crypto.PublicKey) crypto.Hash { + var h crypto.Hash + switch pk := pubKey.(type) { + case *rsa.PublicKey: + h = crypto.SHA256 + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + h = crypto.SHA256 + case elliptic.P384(): + h = crypto.SHA384 + case elliptic.P521(): + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + case ed25519.PublicKey: + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + return h +} + +// LiveTrustedRoot is a wrapper around TrustedRoot that periodically +// refreshes the trusted root from TUF. This is needed for long-running +// processes to ensure that the trusted root does not expire. +type LiveTrustedRoot struct { + *TrustedRoot + mu sync.RWMutex +} + +// NewLiveTrustedRoot returns a LiveTrustedRoot that will periodically +// refresh the trusted root from TUF. +func NewLiveTrustedRoot(opts *tuf.Options) (*LiveTrustedRoot, error) { + return NewLiveTrustedRootFromTarget(opts, defaultTrustedRoot) +} + +// NewLiveTrustedRootFromTarget returns a LiveTrustedRoot that will +// periodically refresh the trusted root from TUF using the provided target. +func NewLiveTrustedRootFromTarget(opts *tuf.Options, target string) (*LiveTrustedRoot, error) { + return NewLiveTrustedRootFromTargetWithPeriod(opts, target, 24*time.Hour) +} + +// NewLiveTrustedRootFromTargetWithPeriod returns a LiveTrustedRoot that +// performs a TUF refresh with the provided period, accesssing the provided +// target. +func NewLiveTrustedRootFromTargetWithPeriod(opts *tuf.Options, target string, rfPeriod time.Duration) (*LiveTrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, fmt.Errorf("failed to create TUF client %w", err) + } + + b, err := client.GetTarget(target) + if err != nil { + return nil, fmt.Errorf("failed to get target from TUF client %w", err) + } + + tr, err := NewTrustedRootFromJSON(b) + if err != nil { + return nil, err + } + ltr := &LiveTrustedRoot{ + TrustedRoot: tr, + mu: sync.RWMutex{}, + } + + ticker := time.NewTicker(rfPeriod) + go func() { + for range ticker.C { + client, err = tuf.New(opts) + if err != nil { + log.Printf("error creating TUF client: %v", err) + } + + b, err := client.GetTarget(target) + if err != nil { + log.Printf("error fetching trusted root: %v", err) + } + + newTr, err := NewTrustedRootFromJSON(b) + if err != nil { + log.Printf("error fetching trusted root: %v", err) + continue + } + ltr.mu.Lock() + ltr.TrustedRoot = newTr + ltr.mu.Unlock() + log.Printf("successfully refreshed the TUF root") + } + }() + return ltr, nil +} + +func (l *LiveTrustedRoot) TimestampingAuthorities() []TimestampingAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.TimestampingAuthorities() +} + +func (l *LiveTrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.FulcioCertificateAuthorities() +} + +func (l *LiveTrustedRoot) RekorLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.RekorLogs() +} + +func (l *LiveTrustedRoot) CTLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.CTLogs() +} + +func (l *LiveTrustedRoot) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.PublicKeyVerifier(keyID) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go new file mode 100644 index 00000000000..78ec96e3a8c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go @@ -0,0 +1,270 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "fmt" + "sort" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +func (tr *TrustedRoot) constructProtoTrustRoot() error { + tr.trustedRoot = &prototrustroot.TrustedRoot{} + tr.trustedRoot.MediaType = TrustedRootMediaType01 + + for logID, transparencyLog := range tr.rekorLogs { + tlProto, err := transparencyLogToProtobufTL(transparencyLog) + if err != nil { + return fmt.Errorf("failed converting rekor log %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Tlogs = append(tr.trustedRoot.Tlogs, tlProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Tlogs) + + for logID, ctLog := range tr.ctLogs { + ctProto, err := transparencyLogToProtobufTL(ctLog) + if err != nil { + return fmt.Errorf("failed converting ctlog %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Ctlogs = append(tr.trustedRoot.Ctlogs, ctProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Ctlogs) + + for _, ca := range tr.certificateAuthorities { + caProto, err := certificateAuthorityToProtobufCA(ca.(*FulcioCertificateAuthority)) + if err != nil { + return fmt.Errorf("failed converting fulcio cert chain to protobuf: %w", err) + } + tr.trustedRoot.CertificateAuthorities = append(tr.trustedRoot.CertificateAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.CertificateAuthorities) + + for _, ca := range tr.timestampingAuthorities { + caProto, err := timestampingAuthorityToProtobufCA(ca.(*SigstoreTimestampingAuthority)) + if err != nil { + return fmt.Errorf("failed converting TSA cert chain to protobuf: %w", err) + } + tr.trustedRoot.TimestampAuthorities = append(tr.trustedRoot.TimestampAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.TimestampAuthorities) + + return nil +} + +func sortCASlice(slc []*prototrustroot.CertificateAuthority) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].ValidFor.Start != nil { + iTime = slc[i].ValidFor.Start.AsTime() + } + if slc[j].ValidFor.Start != nil { + jTime = slc[j].ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func sortTlogSlice(slc []*prototrustroot.TransparencyLogInstance) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].PublicKey.ValidFor.Start != nil { + iTime = slc[i].PublicKey.ValidFor.Start.AsTime() + } + if slc[j].PublicKey.ValidFor.Start != nil { + jTime = slc[j].PublicKey.ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func certificateAuthorityToProtobufCA(ca *FulcioCertificateAuthority) (*prototrustroot.CertificateAuthority, error) { + org := "" + if len(ca.Root.Subject.Organization) > 0 { + org = ca.Root.Subject.Organization[0] + } + var allCerts []*protocommon.X509Certificate + for _, intermed := range ca.Intermediates { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw}) + } + if ca.Root == nil { + return nil, fmt.Errorf("root certificate is nil") + } + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw}) + + caProto := prototrustroot.CertificateAuthority{ + Uri: ca.URI, + Subject: &protocommon.DistinguishedName{ + Organization: org, + CommonName: ca.Root.Subject.CommonName, + }, + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(ca.ValidityPeriodStart), + }, + CertChain: &protocommon.X509CertificateChain{ + Certificates: allCerts, + }, + } + + if !ca.ValidityPeriodEnd.IsZero() { + caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd) + } + + return &caProto, nil +} + +func timestampingAuthorityToProtobufCA(ca *SigstoreTimestampingAuthority) (*prototrustroot.CertificateAuthority, error) { + org := "" + if len(ca.Root.Subject.Organization) > 0 { + org = ca.Root.Subject.Organization[0] + } + var allCerts []*protocommon.X509Certificate + if ca.Leaf != nil { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Leaf.Raw}) + } + for _, intermed := range ca.Intermediates { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw}) + } + if ca.Root == nil { + return nil, fmt.Errorf("root certificate is nil") + } + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw}) + + caProto := prototrustroot.CertificateAuthority{ + Uri: ca.URI, + Subject: &protocommon.DistinguishedName{ + Organization: org, + CommonName: ca.Root.Subject.CommonName, + }, + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(ca.ValidityPeriodStart), + }, + CertChain: &protocommon.X509CertificateChain{ + Certificates: allCerts, + }, + } + + if !ca.ValidityPeriodEnd.IsZero() { + caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd) + } + + return &caProto, nil +} + +func transparencyLogToProtobufTL(tl *TransparencyLog) (*prototrustroot.TransparencyLogInstance, error) { + hashAlgo, err := hashAlgorithmToProtobufHashAlgorithm(tl.HashFunc) + if err != nil { + return nil, fmt.Errorf("failed converting hash algorithm to protobuf: %w", err) + } + publicKey, err := publicKeyToProtobufPublicKey(tl.PublicKey, tl.ValidityPeriodStart, tl.ValidityPeriodEnd) + if err != nil { + return nil, fmt.Errorf("failed converting public key to protobuf: %w", err) + } + trProto := prototrustroot.TransparencyLogInstance{ + BaseUrl: tl.BaseURL, + HashAlgorithm: hashAlgo, + PublicKey: publicKey, + LogId: &protocommon.LogId{ + KeyId: tl.ID, + }, + } + + return &trProto, nil +} + +func hashAlgorithmToProtobufHashAlgorithm(hashAlgorithm crypto.Hash) (protocommon.HashAlgorithm, error) { + switch hashAlgorithm { + case crypto.SHA256: + return protocommon.HashAlgorithm_SHA2_256, nil + case crypto.SHA384: + return protocommon.HashAlgorithm_SHA2_384, nil + case crypto.SHA512: + return protocommon.HashAlgorithm_SHA2_512, nil + case crypto.SHA3_256: + return protocommon.HashAlgorithm_SHA3_256, nil + case crypto.SHA3_384: + return protocommon.HashAlgorithm_SHA3_384, nil + default: + return 0, fmt.Errorf("unsupported hash algorithm for Merkle tree: %v", hashAlgorithm) + } +} + +func publicKeyToProtobufPublicKey(publicKey crypto.PublicKey, start time.Time, end time.Time) (*protocommon.PublicKey, error) { + pkd := protocommon.PublicKey{ + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(start), + }, + } + + if !end.IsZero() { + pkd.ValidFor.End = timestamppb.New(end) + } + + rawBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return nil, fmt.Errorf("failed marshalling public key: %w", err) + } + pkd.RawBytes = rawBytes + + switch p := publicKey.(type) { + case *ecdsa.PublicKey: + switch p.Curve { + case elliptic.P256(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 + case elliptic.P384(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 + case elliptic.P521(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 + default: + return nil, fmt.Errorf("unsupported curve for ecdsa key: %T", p.Curve) + } + case *rsa.PublicKey: + switch p.Size() * 8 { + case 2048: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 + case 3072: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 + case 4096: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 + default: + return nil, fmt.Errorf("unsupported public modulus for RSA key: %d", p.Size()) + } + case ed25519.PublicKey: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ED25519 + default: + return nil, fmt.Errorf("unknown public key type: %T", p) + } + + return &pkd, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go new file mode 100644 index 00000000000..ff8bec4469a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go @@ -0,0 +1,529 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlog + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag/conv" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + rekortilespb "github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf" + "github.com/sigstore/rekor-tiles/v2/pkg/note" + typesverifier "github.com/sigstore/rekor-tiles/v2/pkg/types/verifier" + "github.com/sigstore/rekor-tiles/v2/pkg/verify" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" + dsse_v001 "github.com/sigstore/rekor/pkg/types/dsse/v0.0.1" + hashedrekord_v001 "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1" + intoto_v002 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2" + rekorVerify "github.com/sigstore/rekor/pkg/verify" + "github.com/sigstore/sigstore/pkg/signature" + "google.golang.org/protobuf/encoding/protojson" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +type Entry struct { + kind string + version string + rekorV1Entry types.EntryImpl + rekorV2Entry *rekortilespb.Entry + signedEntryTimestamp []byte + tle *v1.TransparencyLogEntry +} + +type RekorPayload struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` //nolint:tagliatelle +} + +var ErrNilValue = errors.New("validation error: nil value in transaction log entry") +var ErrInvalidRekorV2Entry = errors.New("type error: object is not a Rekor v2 type, try parsing as Rekor v1") + +// Deprecated: use NewTlogEntry. NewEntry only parses a Rekor v1 entry. +func NewEntry(body []byte, integratedTime int64, logIndex int64, logID []byte, signedEntryTimestamp []byte, inclusionProof *models.InclusionProof) (*Entry, error) { + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + rekorEntry, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + + entry := &Entry{ + rekorV1Entry: rekorEntry, + tle: &v1.TransparencyLogEntry{ + LogIndex: logIndex, + LogId: &protocommon.LogId{ + KeyId: logID, + }, + IntegratedTime: integratedTime, + CanonicalizedBody: body, + }, + kind: pe.Kind(), + version: rekorEntry.APIVersion(), + } + + if len(signedEntryTimestamp) > 0 { + entry.signedEntryTimestamp = signedEntryTimestamp + } + + if inclusionProof != nil { + hashes := make([][]byte, len(inclusionProof.Hashes)) + for i, s := range inclusionProof.Hashes { + hashes[i], err = hex.DecodeString(s) + if err != nil { + return nil, err + } + } + rootHashDec, err := hex.DecodeString(*inclusionProof.RootHash) + if err != nil { + return nil, err + } + entry.tle.InclusionProof = &v1.InclusionProof{ + LogIndex: logIndex, + RootHash: rootHashDec, + TreeSize: *inclusionProof.TreeSize, + Hashes: hashes, + Checkpoint: &v1.Checkpoint{ + Envelope: *inclusionProof.Checkpoint, + }, + } + } + + return entry, nil +} + +func NewTlogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) { + var rekorV2Entry *rekortilespb.Entry + var rekorV1Entry types.EntryImpl + var err error + + body := tle.CanonicalizedBody + rekorV2Entry, err = unmarshalRekorV2Entry(body) + if err != nil { + rekorV1Entry, err = unmarshalRekorV1Entry(body) + if err != nil { + return nil, fmt.Errorf("entry body is not a recognizable Rekor v1 or Rekor v2 type: %w", err) + } + } + + entry := &Entry{ + rekorV1Entry: rekorV1Entry, + rekorV2Entry: rekorV2Entry, + kind: tle.KindVersion.Kind, + version: tle.KindVersion.Version, + } + + signedEntryTimestamp := []byte{} + if tle.InclusionPromise != nil && tle.InclusionPromise.SignedEntryTimestamp != nil { + signedEntryTimestamp = tle.InclusionPromise.SignedEntryTimestamp + } + if len(signedEntryTimestamp) > 0 { + entry.signedEntryTimestamp = signedEntryTimestamp + } + entry.tle = tle + + return entry, nil +} + +func ParseTransparencyLogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) { + if tle == nil { + return nil, ErrNilValue + } + if tle.CanonicalizedBody == nil || + tle.LogIndex < 0 || + tle.LogId == nil || + tle.LogId.KeyId == nil || + tle.KindVersion == nil { + return nil, ErrNilValue + } + + if tle.InclusionProof != nil { + if tle.InclusionProof.Checkpoint == nil { + return nil, fmt.Errorf("inclusion proof missing required checkpoint") + } + if tle.InclusionProof.Checkpoint.Envelope == "" { + return nil, fmt.Errorf("inclusion proof checkpoint empty") + } + } + + entry, err := NewTlogEntry(tle) + if err != nil { + return nil, err + } + if entry.kind != tle.KindVersion.Kind || entry.version != tle.KindVersion.Version { + return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, tle.KindVersion.Kind, tle.KindVersion.Version) + } + return entry, nil +} + +// Deprecated: use ParseTransparencyLogEntry. ParseEntry only parses Rekor v1 type entries. +// ParseEntry decodes the entry bytes to a specific entry type (types.EntryImpl). +func ParseEntry(protoEntry *v1.TransparencyLogEntry) (entry *Entry, err error) { + if protoEntry == nil || + protoEntry.CanonicalizedBody == nil || + protoEntry.IntegratedTime == 0 || + protoEntry.LogIndex < 0 || + protoEntry.LogId == nil || + protoEntry.LogId.KeyId == nil || + protoEntry.KindVersion == nil { + return nil, ErrNilValue + } + + signedEntryTimestamp := []byte{} + if protoEntry.InclusionPromise != nil && protoEntry.InclusionPromise.SignedEntryTimestamp != nil { + signedEntryTimestamp = protoEntry.InclusionPromise.SignedEntryTimestamp + } + + var inclusionProof *models.InclusionProof + + if protoEntry.InclusionProof != nil { + var hashes []string + + for _, v := range protoEntry.InclusionProof.Hashes { + hashes = append(hashes, hex.EncodeToString(v)) + } + + rootHash := hex.EncodeToString(protoEntry.InclusionProof.RootHash) + + if protoEntry.InclusionProof.Checkpoint == nil { + return nil, fmt.Errorf("inclusion proof missing required checkpoint") + } + if protoEntry.InclusionProof.Checkpoint.Envelope == "" { + return nil, fmt.Errorf("inclusion proof checkpoint empty") + } + + inclusionProof = &models.InclusionProof{ + LogIndex: conv.Pointer(protoEntry.InclusionProof.LogIndex), + RootHash: &rootHash, + TreeSize: conv.Pointer(protoEntry.InclusionProof.TreeSize), + Hashes: hashes, + Checkpoint: conv.Pointer(protoEntry.InclusionProof.Checkpoint.Envelope), + } + } + + entry, err = NewEntry(protoEntry.CanonicalizedBody, protoEntry.IntegratedTime, protoEntry.LogIndex, protoEntry.LogId.KeyId, signedEntryTimestamp, inclusionProof) + if err != nil { + return nil, err + } + + if entry.kind != protoEntry.KindVersion.Kind || entry.version != protoEntry.KindVersion.Version { + return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, protoEntry.KindVersion.Kind, protoEntry.KindVersion.Version) + } + entry.tle = protoEntry + + return entry, nil +} + +func ValidateEntry(entry *Entry) error { + if entry.rekorV1Entry != nil { + switch e := entry.rekorV1Entry.(type) { + case *dsse_v001.V001Entry: + err := e.DSSEObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *hashedrekord_v001.V001Entry: + err := e.HashedRekordObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *intoto_v002.V002Entry: + err := e.IntotoObj.Validate(strfmt.Default) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported entry type: %T", e) + } + } + if entry.rekorV2Entry != nil { + switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) { + case *rekortilespb.Spec_HashedRekordV002: + err := validateHashedRekordV002Entry(e.HashedRekordV002) + if err != nil { + return err + } + case *rekortilespb.Spec_DsseV002: + err := validateDSSEV002Entry(e.DsseV002) + if err != nil { + return err + } + } + } + return nil +} + +func validateHashedRekordV002Entry(hr *rekortilespb.HashedRekordLogEntryV002) error { + if hr.GetSignature() == nil || len(hr.GetSignature().GetContent()) == 0 { + return fmt.Errorf("missing signature") + } + if hr.GetSignature().GetVerifier() == nil { + return fmt.Errorf("missing verifier") + } + if hr.GetData() == nil { + return fmt.Errorf("missing digest") + } + return typesverifier.Validate(hr.GetSignature().GetVerifier()) +} + +func validateDSSEV002Entry(d *rekortilespb.DSSELogEntryV002) error { + if d.GetPayloadHash() == nil { + return fmt.Errorf("missing payload") + } + if len(d.GetSignatures()) == 0 { + return fmt.Errorf("missing signatures") + } + return typesverifier.Validate(d.GetSignatures()[0].GetVerifier()) +} + +func (entry *Entry) IntegratedTime() time.Time { + if entry.tle.IntegratedTime == 0 { + return time.Time{} + } + return time.Unix(entry.tle.IntegratedTime, 0) +} + +func (entry *Entry) Signature() []byte { + if entry.rekorV1Entry != nil { + switch e := entry.rekorV1Entry.(type) { + case *dsse_v001.V001Entry: + sigBytes, err := base64.StdEncoding.DecodeString(*e.DSSEObj.Signatures[0].Signature) + if err != nil { + return []byte{} + } + return sigBytes + case *hashedrekord_v001.V001Entry: + return e.HashedRekordObj.Signature.Content + case *intoto_v002.V002Entry: + sigBytes, err := base64.StdEncoding.DecodeString(string(*e.IntotoObj.Content.Envelope.Signatures[0].Sig)) + if err != nil { + return []byte{} + } + return sigBytes + } + } + if entry.rekorV2Entry != nil { + switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) { + case *rekortilespb.Spec_HashedRekordV002: + return e.HashedRekordV002.GetSignature().GetContent() + case *rekortilespb.Spec_DsseV002: + return e.DsseV002.GetSignatures()[0].GetContent() + } + } + + return []byte{} +} + +func (entry *Entry) PublicKey() any { + var pk any + var certBytes []byte + + if entry.rekorV1Entry != nil { + var pemString []byte + switch e := entry.rekorV1Entry.(type) { + case *dsse_v001.V001Entry: + pemString = []byte(*e.DSSEObj.Signatures[0].Verifier) + case *hashedrekord_v001.V001Entry: + pemString = []byte(e.HashedRekordObj.Signature.PublicKey.Content) + case *intoto_v002.V002Entry: + pemString = []byte(*e.IntotoObj.Content.Envelope.Signatures[0].PublicKey) + } + certBlock, _ := pem.Decode(pemString) + certBytes = certBlock.Bytes + } else if entry.rekorV2Entry != nil { + var verifier *rekortilespb.Verifier + switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) { + case *rekortilespb.Spec_HashedRekordV002: + verifier = e.HashedRekordV002.GetSignature().GetVerifier() + case *rekortilespb.Spec_DsseV002: + verifier = e.DsseV002.GetSignatures()[0].GetVerifier() + } + switch verifier.Verifier.(type) { + case *rekortilespb.Verifier_PublicKey: + certBytes = verifier.GetPublicKey().GetRawBytes() + case *rekortilespb.Verifier_X509Certificate: + certBytes = verifier.GetX509Certificate().GetRawBytes() + } + } + + var err error + + pk, err = x509.ParseCertificate(certBytes) + if err != nil { + pk, err = x509.ParsePKIXPublicKey(certBytes) + if err != nil { + return nil + } + } + + return pk +} + +func (entry *Entry) LogKeyID() string { + return string(entry.tle.GetLogId().GetKeyId()) +} + +func (entry *Entry) LogIndex() int64 { + return entry.tle.GetLogIndex() +} + +func (entry *Entry) Body() any { + return base64.StdEncoding.EncodeToString(entry.tle.CanonicalizedBody) +} + +func (entry *Entry) HasInclusionPromise() bool { + return entry.signedEntryTimestamp != nil +} + +func (entry *Entry) HasInclusionProof() bool { + return entry.tle.InclusionProof != nil +} + +func (entry *Entry) TransparencyLogEntry() *v1.TransparencyLogEntry { + return entry.tle +} + +// VerifyInclusion verifies a Rekor v1-style checkpoint and the entry's inclusion in the Rekor v1 log. +func VerifyInclusion(entry *Entry, verifier signature.Verifier) error { + hashes := make([]string, len(entry.tle.InclusionProof.Hashes)) + for i, b := range entry.tle.InclusionProof.Hashes { + hashes[i] = hex.EncodeToString(b) + } + rootHash := hex.EncodeToString(entry.tle.GetInclusionProof().GetRootHash()) + logEntry := models.LogEntryAnon{ + IntegratedTime: &entry.tle.IntegratedTime, + LogID: conv.Pointer(string(entry.tle.GetLogId().KeyId)), + LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()), + Body: base64.StdEncoding.EncodeToString(entry.tle.GetCanonicalizedBody()), + Verification: &models.LogEntryAnonVerification{ + InclusionProof: &models.InclusionProof{ + Checkpoint: conv.Pointer(entry.tle.GetInclusionProof().GetCheckpoint().GetEnvelope()), + Hashes: hashes, + LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()), + RootHash: &rootHash, + TreeSize: conv.Pointer(entry.tle.GetInclusionProof().GetTreeSize()), + }, + SignedEntryTimestamp: strfmt.Base64(entry.signedEntryTimestamp), + }, + } + err := rekorVerify.VerifyInclusion(context.Background(), &logEntry) + if err != nil { + return err + } + + err = rekorVerify.VerifyCheckpointSignature(&logEntry, verifier) + if err != nil { + return err + } + + return nil +} + +// VerifyCheckpointAndInclusion verifies a checkpoint and the entry's inclusion in the transparency log. +// This function is compatible with Rekor v1 and Rekor v2. +func VerifyCheckpointAndInclusion(entry *Entry, verifier signature.Verifier, origin string) error { + noteVerifier, err := note.NewNoteVerifier(origin, verifier) + if err != nil { + return fmt.Errorf("loading note verifier: %w", err) + } + err = verify.VerifyLogEntry(entry.TransparencyLogEntry(), noteVerifier) + if err != nil { + return fmt.Errorf("verifying log entry: %w", err) + } + + return nil +} + +func VerifySET(entry *Entry, verifiers map[string]*root.TransparencyLog) error { + if entry.rekorV1Entry == nil { + return fmt.Errorf("can only verify SET for Rekor v1 entry") + } + rekorPayload := RekorPayload{ + Body: entry.Body(), + IntegratedTime: entry.tle.IntegratedTime, + LogIndex: entry.LogIndex(), + LogID: hex.EncodeToString([]byte(entry.LogKeyID())), + } + + verifier, ok := verifiers[hex.EncodeToString([]byte(entry.LogKeyID()))] + if !ok { + return errors.New("rekor log public key not found for payload") + } + if verifier.ValidityPeriodStart.IsZero() { + return errors.New("rekor validity period start time not set") + } + if (verifier.ValidityPeriodStart.After(entry.IntegratedTime())) || + (!verifier.ValidityPeriodEnd.IsZero() && verifier.ValidityPeriodEnd.Before(entry.IntegratedTime())) { + return errors.New("rekor log public key not valid at payload integrated time") + } + + contents, err := json.Marshal(rekorPayload) + if err != nil { + return fmt.Errorf("marshaling: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing: %w", err) + } + + hash := sha256.Sum256(canonicalized) + if ecdsaPublicKey, ok := verifier.PublicKey.(*ecdsa.PublicKey); !ok { + return fmt.Errorf("unsupported public key type: %T", verifier.PublicKey) + } else if !ecdsa.VerifyASN1(ecdsaPublicKey, hash[:], entry.signedEntryTimestamp) { + return errors.New("unable to verify SET") + } + return nil +} + +func unmarshalRekorV1Entry(body []byte) (types.EntryImpl, error) { + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + rekorEntry, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + return rekorEntry, nil +} + +func unmarshalRekorV2Entry(body []byte) (*rekortilespb.Entry, error) { + logEntryBody := rekortilespb.Entry{} + err := protojson.Unmarshal(body, &logEntryBody) + if err != nil { + return nil, ErrInvalidRekorV2Entry + } + return &logEntryBody, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go new file mode 100644 index 00000000000..e2e76d69074 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go @@ -0,0 +1,211 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" + "github.com/theupdateframework/go-tuf/v2/metadata/updater" + + "github.com/sigstore/sigstore-go/pkg/util" +) + +// Client is a Sigstore TUF client +type Client struct { + cfg *config.UpdaterConfig + up *updater.Updater + opts *Options +} + +// New returns a new client with custom options +func New(opts *Options) (*Client, error) { + var c = Client{ + opts: opts, + } + dir := filepath.Join(opts.CachePath, URLToPath(opts.RepositoryBaseURL)) + var err error + + if c.cfg, err = config.New(opts.RepositoryBaseURL, opts.Root); err != nil { + return nil, fmt.Errorf("failed to create TUF client: %w", err) + } + + c.cfg.LocalMetadataDir = dir + c.cfg.LocalTargetsDir = filepath.Join(dir, "targets") + c.cfg.DisableLocalCache = c.opts.DisableLocalCache + c.cfg.PrefixTargetsWithHash = !c.opts.DisableConsistentSnapshot + + if c.cfg.DisableLocalCache { + c.opts.CachePath = "" + c.opts.CacheValidity = 0 + c.opts.ForceCache = false + } + + if opts.Fetcher != nil { + c.cfg.Fetcher = opts.Fetcher + } else { + fetcher := fetcher.NewDefaultFetcher() + fetcher.SetHTTPUserAgent(util.ConstructUserAgent()) + c.cfg.Fetcher = fetcher + } + + // Upon client creation, we may not perform a full TUF update, + // based on the cache control configuration. Start with a local + // client (only reads content on disk) and then decide if we + // must perform a full TUF update. + tmpCfg := *c.cfg + // Create a temporary config for the first use where UnsafeLocalMode + // is true. This means that when we first initialize the client, + // we are guaranteed to only read the metadata on disk. + // Based on that metadata we take a decision if a full TUF + // refresh should be done or not. As so, the tmpCfg is only needed + // here and not in future invocations. + tmpCfg.UnsafeLocalMode = true + c.up, err = updater.New(&tmpCfg) + if err != nil { + return nil, fmt.Errorf("failed to create initial TUF updater: %w", err) + } + if err = c.loadMetadata(); err != nil { + return nil, fmt.Errorf("failed to load metadata: %w", err) + } + + return &c, nil +} + +// DefaultClient returns a Sigstore TUF client for the public good instance +func DefaultClient() (*Client, error) { + opts := DefaultOptions() + + return New(opts) +} + +// loadMetadata controls if the client actually should perform a TUF refresh. +// The TUF specification mandates so, but for certain Sigstore clients, it +// may be beneficial to rely on the cache, or in air-gapped deployments it +// it may not even be possible. +func (c *Client) loadMetadata() error { + // Load the metadata into memory and verify it + if err := c.up.Refresh(); err != nil { + // this is most likely due to the lack of metadata files + // on disk. Perform a full update and return. + return c.Refresh() + } + + if c.opts.ForceCache { + return nil + } else if c.opts.CacheValidity > 0 { + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Config may not exist, don't error + // create a new empty config + cfg = &Config{} + } + + cacheValidUntil := cfg.LastTimestamp.AddDate(0, 0, c.opts.CacheValidity) + if time.Now().Before(cacheValidUntil) { + // No need to update + return nil + } + } + + return c.Refresh() +} + +func (c *Client) configPath() string { + var p = filepath.Join( + c.opts.CachePath, + fmt.Sprintf("%s.json", URLToPath(c.opts.RepositoryBaseURL)), + ) + + return p +} + +// Refresh forces a refresh of the underlying TUF client. +// As the tuf client updater does not support multiple refreshes during +// its life-time, this will replace the TUF client updater with a new one. +func (c *Client) Refresh() error { + var err error + + c.up, err = updater.New(c.cfg) + if err != nil { + return fmt.Errorf("failed to create tuf updater: %w", err) + } + err = c.up.Refresh() + if err != nil { + return fmt.Errorf("tuf refresh failed: %w", err) + } + // If cache is disabled, we don't need to persist the last timestamp + if c.cfg.DisableLocalCache { + return nil + } + // Update config with last update + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Likely config file did not exit, create it + cfg = &Config{} + } + cfg.LastTimestamp = time.Now() + // ignore error writing update config file + _ = cfg.Persist(c.configPath()) + + return nil +} + +// GetTarget returns a target file from the TUF repository +func (c *Client) GetTarget(target string) ([]byte, error) { + // Set filepath to the empty string. When we get targets, + // we rely in the target info struct instead. + const filePath = "" + ti, err := c.up.GetTargetInfo(target) + if err != nil { + return nil, fmt.Errorf("getting info for target \"%s\": %w", target, err) + } + + path, tb, err := c.up.FindCachedTarget(ti, filePath) + if err != nil { + return nil, fmt.Errorf("getting target cache: %w", err) + } + if path != "" { + // Cached version found + return tb, nil + } + + // Download of target is needed + // Ignore targetsBaseURL, set to empty string + const targetsBaseURL = "" + _, tb, err = c.up.DownloadTarget(ti, filePath, targetsBaseURL) + if err != nil { + return nil, fmt.Errorf("failed to download target file %s - %w", target, err) + } + + return tb, nil +} + +// URLToPath converts a URL to a filename-compatible string +func URLToPath(url string) string { + // Strip scheme, replace slashes with dashes + // e.g. https://github.github.com/prod-tuf-root -> github.github.com-prod-tuf-root + fn := url + fn, _ = strings.CutPrefix(fn, "https://") + fn, _ = strings.CutPrefix(fn, "http://") + fn = strings.ReplaceAll(fn, "/", "-") + fn = strings.ReplaceAll(fn, ":", "-") + + return strings.ToLower(fn) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go new file mode 100644 index 00000000000..3f5a81f1e5e --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "encoding/json" + "fmt" + "os" + "time" +) + +type Config struct { + LastTimestamp time.Time `json:"lastTimestamp"` +} + +func LoadConfig(p string) (*Config, error) { + var c Config + + b, err := os.ReadFile(p) + if err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + err = json.Unmarshal(b, &c) + if err != nil { + return nil, fmt.Errorf("malformed config file: %w", err) + } + + return &c, nil +} + +func (c *Config) Persist(p string) error { + b, err := json.Marshal(c) + if err != nil { + return fmt.Errorf("failed to JSON marshal config: %w", err) + } + err = os.WriteFile(p, b, 0600) + if err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go new file mode 100644 index 00000000000..e3df77a55d5 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go @@ -0,0 +1,177 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "embed" + "math" + "os" + "path/filepath" + + "github.com/sigstore/sigstore-go/pkg/util" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +//go:embed repository +var embeddedRepo embed.FS + +const ( + DefaultMirror = "https://tuf-repo-cdn.sigstore.dev" + StagingMirror = "https://tuf-repo-cdn.sigstage.dev" + + // The following caching values can be used for the CacheValidity option + NoCache = 0 + MaxCache = math.MaxInt +) + +// Options represent the various options for a Sigstore TUF Client +type Options struct { + // CacheValidity period in days (default 0). The client will persist a + // timestamp with the cache after refresh. Note that the client will + // always refresh the cache if the metadata is expired or if the client is + // unable to find a persisted timestamp, so this is not an optimal control + // for air-gapped environments. Use const MaxCache to update the cache when + // the metadata is expired, though the first initialization will still + // refresh the cache. + CacheValidity int + // ForceCache controls if the cache should be used without update + // as long as the metadata is valid. Use ForceCache over CacheValidity + // if you want to always use the cache up until its expiration. Note that + // the client will refresh the cache once the metadata has expired, so this + // is not an optimal control for air-gapped environments. Clients instead + // should provide a trust root file directly to the client to bypass TUF. + ForceCache bool + // Root is the TUF trust anchor + Root []byte + // CachePath is the location on disk for TUF cache + // (default $HOME/.sigstore/tuf) + CachePath string + // RepositoryBaseURL is the TUF repository location URL + // (default https://tuf-repo-cdn.sigstore.dev) + RepositoryBaseURL string + // DisableLocalCache mode allows a client to work on a read-only + // files system if this is set, cache path is ignored. + DisableLocalCache bool + // DisableConsistentSnapshot + DisableConsistentSnapshot bool + // Fetcher is the metadata fetcher + Fetcher fetcher.Fetcher +} + +// WithCacheValidity sets the cache validity period in days +func (o *Options) WithCacheValidity(days int) *Options { + o.CacheValidity = days + return o +} + +// WithForceCache forces the client to use the cache without updating +func (o *Options) WithForceCache() *Options { + o.ForceCache = true + return o +} + +// WithRoot sets the TUF trust anchor +func (o *Options) WithRoot(root []byte) *Options { + o.Root = root + return o +} + +// WithCachePath sets the location on disk for TUF cache +func (o *Options) WithCachePath(path string) *Options { + o.CachePath = path + return o +} + +// WithRepositoryBaseURL sets the TUF repository location URL +func (o *Options) WithRepositoryBaseURL(url string) *Options { + o.RepositoryBaseURL = url + return o +} + +// WithDisableLocalCache sets the client to work on a read-only file system +func (o *Options) WithDisableLocalCache() *Options { + o.DisableLocalCache = true + return o +} + +// WithDisableConsistentSnapshot sets the client to disable consistent snapshot +func (o *Options) WithDisableConsistentSnapshot() *Options { + o.DisableConsistentSnapshot = true + return o +} + +// WithFetcher sets the metadata fetcher +func (o *Options) WithFetcher(f fetcher.Fetcher) *Options { + o.Fetcher = f + return o +} + +// DefaultOptions returns an options struct for the public good instance +func DefaultOptions() *Options { + var opts Options + var err error + + opts.Root = DefaultRoot() + home, err := os.UserHomeDir() + if err != nil { + // Fall back to using a TUF repository in the temp location + home = os.TempDir() + } + opts.CachePath = filepath.Join(home, ".sigstore", "root") + opts.RepositoryBaseURL = DefaultMirror + fetcher := fetcher.NewDefaultFetcher() + fetcher.SetHTTPUserAgent(util.ConstructUserAgent()) + opts.Fetcher = fetcher + + return &opts +} + +// DefaultRoot returns the root.json for the public good instance +func DefaultRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} + +// StagingRoot returns the root.json for the staging instance +func StagingRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/staging_root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json new file mode 100644 index 00000000000..fe4f3ba2189 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json @@ -0,0 +1,145 @@ +{ + "signatures": [ + { + "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", + "sig": "" + }, + { + "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "sig": "3045022100bbddd464f8066ceb88ba787375c12cd6330680e08c2910703e6538c71cc79ad202205190b06e4537fe961b3ef81fe68edcd0089c19f919afed423b9aafd700641153" + }, + { + "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "sig": "3044022069306cd5257f732a740c1afe60a8e433c5de58eafeadbe99c336c9c71d198cf802200d773953ae7dbc48d3e5bad9a6f64bafff196b7e2ad4a52a19519367d47dc042" + }, + { + "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "sig": "304402204d21a2ec80df66e61f6fe2912951dc47df836036f8c0ab10816d375e71dbf79e0220547adce1afdf04e6794efa203dd5264c6f7e0ef78e57fe934b0d26cb994eec76" + }, + { + "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "sig": "3045022060826496557144eb1649893ed5f6f4ea54536feb0ca82f8b89ae641be39743e5022100ad7118b5e9d4837326206e412fc6da2999925d110328a7c166b06c624336c93f" + }, + { + "keyid": "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632", + "sig": "3046022100d8179439c2e73eb0c1733abee7faf832dcaea7263edcb4919891c3a247f05923022100e1a437e0797e803f9b72dc9d2d92155b0a2270c24efdd5f4b3a5d8f0b0f431a7" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2026-01-22T13:05:59Z", + "keys": { + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms:projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp/cryptoKeyVersions/1" + }, + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMxpPOJCIZ5otG4106fGJseEQi3V9\npkMYQ4uyV9Tj1M7WHXIyLG+jkfvuG0glQ1JZbRZZBV3gAR4sojdGHISeow==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@lance" + }, + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@santiagotorres" + }, + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@bobcallaway" + }, + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + } + }, + "roles": { + "root": { + "keyids": [ + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632" + ], + "threshold": 3 + }, + "snapshot": { + "keyids": [ + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", + "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", + "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", + "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", + "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632" + ], + "threshold": 3 + }, + "timestamp": { + "keyids": [ + "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 6 + } + }, + "spec_version": "1.0", + "version": 13, + "x-tuf-on-ci-expiry-period": 197, + "x-tuf-on-ci-signing-period": 46 + } +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json new file mode 100644 index 00000000000..2a06edad088 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json @@ -0,0 +1,107 @@ +{ + "signatures": [ + { + "keyid": "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "sig": "3046022100fe72afdbab1bef70c6f461f39f5e75cf543e5277648bfab798a108a0f76f0ca002210098e1e1804b7a13bab42c063691864d85fc4bf6f5a875346b388be00f139c6118" + }, + { + "keyid": "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "sig": "304502210094423ead9a7d546d703f649b408441688eb30f3279fb065b28eea05d2b36843102206f21fa2888836485964c7cb7468a16ddb6297784c50cdba03888578d7b46e0c7" + }, + { + "keyid": "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "sig": "" + }, + { + "keyid": "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5", + "sig": "" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2025-12-26T13:27:03Z", + "keys": { + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEoxkvDOmtGEknB3M+ZkPts8joDM0X\nIH5JZwPlgC2CXs/eqOuNF8AcEWwGYRiDhV/IMlQw5bg8PLICQcgsbrDiKg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + }, + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE++Wv+DcLRk+mfkmlpCwl1GUi9EMh\npBUTz8K0fH7bE4mQuViGSyWA/eyMc0HvzZi6Xr0diHw0/lUPBvok214YQw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@kommendorkapten" + }, + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFHDb85JH+JYR1LQmxiz4UMokVMnP\nxKoWpaEnFCKXH8W4Fc/DfIxMnkpjCuvWUBdJXkO0aDIxwsij8TOFh2R7dw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEohqIdE+yTl4OxpX8ZxNUPrg3SL9H\nBDnhZuceKkxy2oMhUOxhWweZeG3bfM1T4ZLnJimC6CAYVU5+F5jZCoftRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@jku" + }, + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExxmEtmhF5U+i+v/6he4BcSLzCgMx\n/0qSrvDg6bUWwUrkSKS2vDpcJrhGy5fmmhRrGawjPp1ALpC3y1kqFTpXDg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms:projects/projectsigstore-staging/locations/global/keyRings/tuf-keyring/cryptoKeys/tuf-key/cryptoKeyVersions/2" + } + }, + "roles": { + "root": { + "keyids": [ + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5" + ], + "threshold": 2 + }, + "snapshot": { + "keyids": [ + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81", + "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc", + "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237", + "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5" + ], + "threshold": 1 + }, + "timestamp": { + "keyids": [ + "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 6 + } + }, + "spec_version": "1.0", + "version": 12, + "x-tuf-on-ci-expiry-period": 182, + "x-tuf-on-ci-signing-period": 35 + } +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go new file mode 100644 index 00000000000..e5a1b088c18 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util //nolint:revive + +import ( + "runtime/debug" +) + +func ConstructUserAgent() string { + userAgent := "sigstore-go" + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return userAgent + } + + for _, eachDep := range buildInfo.Deps { + if eachDep.Path == "github.com/sigstore/sigstore-go" { + userAgent += "/" + userAgent += eachDep.Version + } + } + + return userAgent +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go new file mode 100644 index 00000000000..e33d915a8d8 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go @@ -0,0 +1,34 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +func VerifyLeafCertificate(observerTimestamp time.Time, leafCert *x509.Certificate, trustedMaterial root.TrustedMaterial) ([][]*x509.Certificate, error) { // nolint: revive + for _, ca := range trustedMaterial.FulcioCertificateAuthorities() { + chains, err := ca.Verify(leafCert, observerTimestamp) + if err == nil { + return chains, nil + } + } + + return nil, errors.New("leaf certificate verification failed") +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go new file mode 100644 index 00000000000..1d3bad1a714 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go @@ -0,0 +1,217 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" +) + +type SubjectAlternativeNameMatcher struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type IssuerMatcher struct { + Issuer string `json:"issuer"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type CertificateIdentity struct { + SubjectAlternativeName SubjectAlternativeNameMatcher `json:"subjectAlternativeName"` + Issuer IssuerMatcher `json:"issuer"` + certificate.Extensions +} + +type CertificateIdentities []CertificateIdentity + +type ErrValueMismatch struct { + object string + expected string + actual string +} + +func (e *ErrValueMismatch) Error() string { + return fmt.Sprintf("expected %s value \"%s\", got \"%s\"", e.object, e.expected, e.actual) +} + +type ErrValueRegexMismatch struct { + object string + regex string + value string +} + +func (e *ErrValueRegexMismatch) Error() string { + return fmt.Sprintf("expected %s value to match regex \"%s\", got \"%s\"", e.object, e.regex, e.value) +} + +type ErrNoMatchingCertificateIdentity struct { + errors []error +} + +func (e *ErrNoMatchingCertificateIdentity) Error() string { + if len(e.errors) > 0 { + return fmt.Sprintf("no matching CertificateIdentity found, last error: %v", e.errors[len(e.errors)-1]) + } + return "no matching CertificateIdentity found" +} + +func (e *ErrNoMatchingCertificateIdentity) Unwrap() []error { + return e.errors +} + +// NewSANMatcher provides an easier way to create a SubjectAlternativeNameMatcher. +// If the regexpStr fails to compile into a Regexp, an error is returned. +func NewSANMatcher(sanValue string, regexpStr string) (SubjectAlternativeNameMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return SubjectAlternativeNameMatcher{}, err + } + + return SubjectAlternativeNameMatcher{ + SubjectAlternativeName: sanValue, + Regexp: *r}, nil +} + +// The default Regexp json marshal is quite ugly, so we override it here. +func (s *SubjectAlternativeNameMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp string `json:"regexp,omitempty"` + }{ + SubjectAlternativeName: s.SubjectAlternativeName, + Regexp: s.Regexp.String(), + }) +} + +// Verify checks if the actualCert matches the SANMatcher's Value and +// Regexp – if those values have been provided. +func (s SubjectAlternativeNameMatcher) Verify(actualCert certificate.Summary) error { + if s.SubjectAlternativeName != "" && + actualCert.SubjectAlternativeName != s.SubjectAlternativeName { + return &ErrValueMismatch{"SAN", string(s.SubjectAlternativeName), string(actualCert.SubjectAlternativeName)} + } + + if s.Regexp.String() != "" && + !s.Regexp.MatchString(actualCert.SubjectAlternativeName) { + return &ErrValueRegexMismatch{"SAN", string(s.Regexp.String()), string(actualCert.SubjectAlternativeName)} + } + return nil +} + +func NewIssuerMatcher(issuerValue, regexpStr string) (IssuerMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return IssuerMatcher{}, err + } + + return IssuerMatcher{Issuer: issuerValue, Regexp: *r}, nil +} + +func (i *IssuerMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Issuer string `json:"issuer"` + Regexp string `json:"regexp,omitempty"` + }{ + Issuer: i.Issuer, + Regexp: i.Regexp.String(), + }) +} + +func (i IssuerMatcher) Verify(actualCert certificate.Summary) error { + if i.Issuer != "" && + actualCert.Issuer != i.Issuer { + return &ErrValueMismatch{"issuer", string(i.Issuer), string(actualCert.Issuer)} + } + + if i.Regexp.String() != "" && + !i.Regexp.MatchString(actualCert.Issuer) { + return &ErrValueRegexMismatch{"issuer", string(i.Regexp.String()), string(actualCert.Issuer)} + } + return nil +} + +func NewCertificateIdentity(sanMatcher SubjectAlternativeNameMatcher, issuerMatcher IssuerMatcher, extensions certificate.Extensions) (CertificateIdentity, error) { + if sanMatcher.SubjectAlternativeName == "" && sanMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, there must be subject alternative name criteria") + } + + if issuerMatcher.Issuer == "" && issuerMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, must specify Issuer criteria") + } + + if extensions.Issuer != "" { + return CertificateIdentity{}, errors.New("please specify issuer in IssuerMatcher, not Extensions") + } + + certID := CertificateIdentity{ + SubjectAlternativeName: sanMatcher, + Issuer: issuerMatcher, + Extensions: extensions, + } + + return certID, nil +} + +// NewShortCertificateIdentity provides a more convenient way of initializing +// a CertificiateIdentity with a SAN and the Issuer OID extension. If you need +// to check more OID extensions, use NewCertificateIdentity instead. +func NewShortCertificateIdentity(issuer, issuerRegex, sanValue, sanRegex string) (CertificateIdentity, error) { + sanMatcher, err := NewSANMatcher(sanValue, sanRegex) + if err != nil { + return CertificateIdentity{}, err + } + + issuerMatcher, err := NewIssuerMatcher(issuer, issuerRegex) + if err != nil { + return CertificateIdentity{}, err + } + + return NewCertificateIdentity(sanMatcher, issuerMatcher, certificate.Extensions{}) +} + +// Verify verifies the CertificateIdentities, and if ANY of them match the cert, +// it returns the CertificateIdentity that matched. If none match, it returns an +// error. +func (i CertificateIdentities) Verify(cert certificate.Summary) (*CertificateIdentity, error) { + multierr := &ErrNoMatchingCertificateIdentity{} + var err error + for _, ci := range i { + if err = ci.Verify(cert); err == nil { + return &ci, nil + } + multierr.errors = append(multierr.errors, err) + } + return nil, multierr +} + +// Verify checks if the actualCert matches the CertificateIdentity's SAN and +// any of the provided OID extension values. Any empty values are ignored. +func (c CertificateIdentity) Verify(actualCert certificate.Summary) error { + var err error + if err = c.SubjectAlternativeName.Verify(actualCert); err != nil { + return err + } + + if err = c.Issuer.Verify(actualCert); err != nil { + return err + } + + return certificate.CompareExtensions(c.Extensions, actualCert.Extensions) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go new file mode 100644 index 00000000000..18263c09833 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go @@ -0,0 +1,39 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" +) + +type ErrVerification struct { + err error +} + +func NewVerificationError(e error) ErrVerification { + return ErrVerification{e} +} + +func (e ErrVerification) Unwrap() error { + return e.err +} + +func (e ErrVerification) String() string { + return fmt.Sprintf("verification error: %s", e.err.Error()) +} + +func (e ErrVerification) Error() string { + return e.String() +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go new file mode 100644 index 00000000000..85bcb50284d --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go @@ -0,0 +1,130 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" +) + +var errNotImplemented = errors.New("not implemented") + +type HasInclusionPromise interface { + HasInclusionPromise() bool +} + +type HasInclusionProof interface { + HasInclusionProof() bool +} + +type SignatureProvider interface { + SignatureContent() (SignatureContent, error) +} + +type SignedTimestampProvider interface { + Timestamps() ([][]byte, error) +} + +type TlogEntryProvider interface { + TlogEntries() ([]*tlog.Entry, error) +} + +type VerificationProvider interface { + VerificationContent() (VerificationContent, error) +} + +type VersionProvider interface { + Version() (string, error) +} + +type SignedEntity interface { + HasInclusionPromise + HasInclusionProof + SignatureProvider + SignedTimestampProvider + TlogEntryProvider + VerificationProvider + VersionProvider +} + +type VerificationContent interface { + CompareKey(any, root.TrustedMaterial) bool + ValidAtTime(time.Time, root.TrustedMaterial) bool + Certificate() *x509.Certificate + PublicKey() PublicKeyProvider +} + +type SignatureContent interface { + Signature() []byte + EnvelopeContent() EnvelopeContent + MessageSignatureContent() MessageSignatureContent +} + +type PublicKeyProvider interface { + Hint() string +} + +type MessageSignatureContent interface { + Digest() []byte + DigestAlgorithm() string + Signature() []byte +} + +type EnvelopeContent interface { + RawEnvelope() *dsse.Envelope + Statement() (*in_toto.Statement, error) +} + +// BaseSignedEntity is a helper struct that implements all the interfaces +// of SignedEntity. It can be embedded in a struct to implement the SignedEntity +// interface. This may be useful for testing, or for implementing a SignedEntity +// that only implements a subset of the interfaces. +type BaseSignedEntity struct{} + +var _ SignedEntity = &BaseSignedEntity{} + +func (b *BaseSignedEntity) HasInclusionPromise() bool { + return false +} + +func (b *BaseSignedEntity) HasInclusionProof() bool { + return false +} + +func (b *BaseSignedEntity) VerificationContent() (VerificationContent, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) SignatureContent() (SignatureContent, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Timestamps() ([][]byte, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) TlogEntries() ([]*tlog.Entry, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Version() (string, error) { + return "", errNotImplemented +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go new file mode 100644 index 00000000000..bf447c28c39 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go @@ -0,0 +1,100 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "encoding/hex" + "errors" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/ctutil" + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" + "github.com/sigstore/sigstore-go/pkg/root" +) + +// VerifySignedCertificateTimestamp, given a threshold, TrustedMaterial, and a +// leaf certificate, will extract SCTs from the leaf certificate and verify the +// timestamps using the TrustedMaterial's FulcioCertificateAuthorities() and +// CTLogs() +func VerifySignedCertificateTimestamp(chains [][]*x509.Certificate, threshold int, trustedMaterial root.TrustedMaterial) error { // nolint: revive + if len(chains) == 0 || len(chains[0]) == 0 || chains[0][0] == nil { + return errors.New("no chains provided") + } + // The first certificate in the chain is always the leaf certificate + leaf := chains[0][0] + + ctlogs := trustedMaterial.CTLogs() + + scts, err := x509util.ParseSCTsFromCertificate(leaf.Raw) + if err != nil { + return err + } + + leafCTCert, err := ctx509.ParseCertificates(leaf.Raw) + if err != nil { + return err + } + + verified := 0 + for _, sct := range scts { + encodedKeyID := hex.EncodeToString(sct.LogID.KeyID[:]) + key, ok := ctlogs[encodedKeyID] + if !ok { + // skip entries the trust root cannot verify + continue + } + + // Ensure sct is within ctlog validity window + sctTime := ct.TimestampToTime(sct.Timestamp) + if !key.ValidityPeriodStart.IsZero() && sctTime.Before(key.ValidityPeriodStart) { + // skip entries that were before ctlog key start time + continue + } + if !key.ValidityPeriodEnd.IsZero() && sctTime.After(key.ValidityPeriodEnd) { + // skip entries that were after ctlog key end time + continue + } + + for _, chain := range chains { + fulcioChain := make([]*ctx509.Certificate, len(leafCTCert)) + copy(fulcioChain, leafCTCert) + + if len(chain) < 2 { + continue + } + parentCert := chain[1].Raw + + fulcioIssuer, err := ctx509.ParseCertificates(parentCert) + if err != nil { + continue + } + fulcioChain = append(fulcioChain, fulcioIssuer...) + + err = ctutil.VerifySCT(key.PublicKey, fulcioChain, sct, true) + if err == nil { + verified++ + } + } + } + + if verified < threshold { + return fmt.Errorf("only able to verify %d SCT entries; unable to meet threshold of %d", verified, threshold) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go new file mode 100644 index 00000000000..0386a98e18c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go @@ -0,0 +1,519 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/x509" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "os" + "slices" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/signature" + sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const maxAllowedSubjects = 1024 +const maxAllowedSubjectDigests = 32 + +var ErrDSSEInvalidSignatureCount = errors.New("exactly one signature is required") + +func VerifySignature(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive + verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + return verifySignatureWithVerifier(verifier, sigContent, verificationContent, trustedMaterial) +} + +func verifySignatureWithVerifier(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelope(verifier, envelope) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return errors.New("artifact must be provided to verify message signature") + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func VerifySignatureWithArtifacts(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive + verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + return verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, trustedMaterial, artifacts) +} + +func verifySignatureWithVerifierAndArtifacts(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive + envelope := sigContent.EnvelopeContent() + msg := sigContent.MessageSignatureContent() + if envelope == nil && msg == nil { + return fmt.Errorf("signature content has neither an envelope or a message") + } + // If there is only one artifact and no envelope, + // attempt to verify the message signature with the artifact. + if envelope == nil { + if len(artifacts) != 1 { + return fmt.Errorf("only one artifact can be verified with a message signature") + } + return verifyMessageSignature(verifier, msg, artifacts[0]) + } + + // Otherwise, verify the envelope with the provided artifacts + return verifyEnvelopeWithArtifacts(verifier, envelope, artifacts) +} + +func VerifySignatureWithArtifactDigests(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive + verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + return verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, trustedMaterial, digests) +} + +func verifySignatureWithVerifierAndArtifactDigests(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive + envelope := sigContent.EnvelopeContent() + msg := sigContent.MessageSignatureContent() + if envelope == nil && msg == nil { + return fmt.Errorf("signature content has neither an envelope or a message") + } + // If there is only one artifact and no envelope, + // attempt to verify the message signature with the artifact. + if envelope == nil { + if len(digests) != 1 { + return fmt.Errorf("only one artifact can be verified with a message signature") + } + return verifyMessageSignatureWithArtifactDigest(verifier, msg, digests[0].Digest) + } + + return verifyEnvelopeWithArtifactDigests(verifier, envelope, digests) +} + +// compatVerifier is a signature.Verifier that tries multiple verifiers +// and returns nil if any of them verify the signature. This is used to +// verify signatures that were generated with old clients that used SHA256 +// for ECDSA P384/P521 keys. +type compatVerifier struct { + verifiers []signature.Verifier +} + +func (v *compatVerifier) VerifySignature(signature, message io.Reader, opts ...signature.VerifyOption) error { + // Create a buffer to store the signature bytes + sigBuf := &bytes.Buffer{} + sigTee := io.TeeReader(signature, sigBuf) + sigBytes, err := io.ReadAll(sigTee) + if err != nil { + return fmt.Errorf("failed to read signature: %w", err) + } + + // Create a buffer to store the message bytes + msgBuf := &bytes.Buffer{} + msgTee := io.TeeReader(message, msgBuf) + msgBytes, err := io.ReadAll(msgTee) + if err != nil { + return fmt.Errorf("failed to read message: %w", err) + } + + for idx, verifier := range v.verifiers { + if idx != 0 { + fmt.Fprint(os.Stderr, "Failed to verify signature with default verifier, trying compatibility verifier\n") + } + err := verifier.VerifySignature(bytes.NewReader(sigBytes), bytes.NewReader(msgBytes), opts...) + if err == nil { + return nil + } + } + return fmt.Errorf("no compatible verifier found") +} + +func (v *compatVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return v.verifiers[0].PublicKey(opts...) +} + +func compatSignatureVerifier(leafCert *x509.Certificate, enableCompat bool, isDSSE bool) (signature.Verifier, error) { + // LoadDefaultSigner/Verifier functions accept a few options to select + // the default signer/verifier when there are ambiguities, like for + // ED25519 keys, which could be used with PureEd25519 or Ed25519ph. + // + // When dealing with DSSE, use ED25519, but when we are working with + // hashedrekord entries, use ED25519ph by default, because this is the + // only option. + var defaultOpts []signature.LoadOption + if !isDSSE { + defaultOpts = []signature.LoadOption{options.WithED25519ph()} + } + + verifiers := make([]signature.Verifier, 0) + verifier, err := signature.LoadDefaultVerifier(leafCert.PublicKey, defaultOpts...) + if err != nil { + return nil, err + } + // If compatibility is not enabled, return only the default verifier + if !enableCompat { + return verifier, nil + } + verifiers = append(verifiers, verifier) + + // Add a compatibility verifier for ECDSA P384/P521, because we still want + // to verify signatures generated with old clients that used SHA256 + var algorithmDetails signature.AlgorithmDetails + if pubKey, ok := leafCert.PublicKey.(*ecdsa.PublicKey); ok { + switch pubKey.Curve { + case elliptic.P384(): + //nolint:staticcheck // Need to use deprecated field for backwards compatibility + algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256) + case elliptic.P521(): + //nolint:staticcheck // Need to use deprecated field for backwards compatibility + algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_256) + default: + return verifier, nil + } + if err != nil { + return nil, err + } + verifier, err = signature.LoadVerifierFromAlgorithmDetails(leafCert.PublicKey, algorithmDetails, defaultOpts...) + } + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + return &compatVerifier{verifiers: verifiers}, nil +} + +func getSignatureVerifier(sigContent SignatureContent, verificationContent VerificationContent, tm root.TrustedMaterial, enableCompat bool) (signature.Verifier, error) { + if leafCert := verificationContent.Certificate(); leafCert != nil { + isDSSE := sigContent.EnvelopeContent() != nil + return compatSignatureVerifier(leafCert, enableCompat, isDSSE) + } else if pk := verificationContent.PublicKey(); pk != nil { + return tm.PublicKeyVerifier(pk.Hint()) + } + + return nil, fmt.Errorf("no public key or certificate found") +} + +func verifyEnvelope(verifier signature.Verifier, envelope EnvelopeContent) error { + dsseEnv := envelope.RawEnvelope() + + // A DSSE envelope in a Sigstore bundle MUST only contain one + // signature, even though DSSE is more permissive. + if len(dsseEnv.Signatures) != 1 { + return ErrDSSEInvalidSignatureCount + } + pub, err := verifier.PublicKey() + if err != nil { + return fmt.Errorf("could not fetch verifier public key: %w", err) + } + envVerifier, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{ + SignatureVerifier: verifier, + Pub: pub, + }) + + if err != nil { + return fmt.Errorf("could not load envelope verifier: %w", err) + } + + _, err = envVerifier.Verify(context.Background(), dsseEnv) + if err != nil { + return fmt.Errorf("could not verify envelope: %w", err) + } + + return nil +} + +func verifyEnvelopeWithArtifacts(verifier signature.Verifier, envelope EnvelopeContent, artifacts []io.Reader) error { + if err := verifyEnvelope(verifier, envelope); err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + if err = limitSubjects(statement); err != nil { + return err + } + // Sanity check (no subjects) + if len(statement.Subject) == 0 { + return errors.New("no subjects found in statement") + } + + // determine which hash functions to use + hashFuncs, err := getHashFunctions(statement) + if err != nil { + return fmt.Errorf("unable to determine hash functions: %w", err) + } + + hashedArtifacts := make([]map[crypto.Hash][]byte, len(artifacts)) + for i, artifact := range artifacts { + // Compute digest of the artifact. + hasher, err := newMultihasher(hashFuncs) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to create hasher: %w", err) + } + if _, err = io.Copy(hasher, artifact); err != nil { + return fmt.Errorf("could not verify artifact: unable to calculate digest: %w", err) + } + hashedArtifacts[i] = hasher.Sum(nil) + } + + // create a map based on the digests present in the statement + // the map key is the hash algorithm and the field is a slice of digests + // created using that hash algorithm + subjectDigests := make(map[crypto.Hash][][]byte) + for _, subject := range statement.Subject { + for alg, hexdigest := range subject.Digest { + hf, err := algStringToHashFunc(alg) + if err != nil { + continue + } + if _, ok := subjectDigests[hf]; !ok { + subjectDigests[hf] = make([][]byte, 0) + } + digest, err := hex.DecodeString(hexdigest) + if err != nil { + continue + } + subjectDigests[hf] = append(subjectDigests[hf], digest) + } + } + + // now loop over the provided artifact digests and try to compare them + // to the mapped subject digests + // if we cannot find a match, exit with an error + for _, ha := range hashedArtifacts { + matchFound := false + for key, value := range ha { + statementDigests, ok := subjectDigests[key] + if !ok { + return fmt.Errorf("no matching artifact hash algorithm found in subject digests") + } + if ok := isDigestInSlice(value, statementDigests); ok { + matchFound = true + break + } + } + if !matchFound { + return fmt.Errorf("provided artifact digests do not match digests in statement") + } + } + + return nil +} + +func verifyEnvelopeWithArtifactDigests(verifier signature.Verifier, envelope EnvelopeContent, digests []ArtifactDigest) error { + if err := verifyEnvelope(verifier, envelope); err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + if err = limitSubjects(statement); err != nil { + return err + } + + // create a map based on the digests present in the statement + // the map key is the hash algorithm and the field is a slice of digests + // created using that hash algorithm + subjectDigests := make(map[string][][]byte) + for _, subject := range statement.Subject { + for alg, digest := range subject.Digest { + if _, ok := subjectDigests[alg]; !ok { + subjectDigests[alg] = make([][]byte, 0) + } + hexdigest, err := hex.DecodeString(digest) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err) + } + subjectDigests[alg] = append(subjectDigests[alg], hexdigest) + } + } + + // now loop over the provided artifact digests and compare them to the mapped subject digests + // if we cannot find a match, exit with an error + for _, artifactDigest := range digests { + statementDigests, ok := subjectDigests[artifactDigest.Algorithm] + if !ok { + return fmt.Errorf("provided artifact digests does not match digests in statement") + } + if ok := isDigestInSlice(artifactDigest.Digest, statementDigests); !ok { + return fmt.Errorf("provided artifact digest does not match any digest in statement") + } + } + + return nil +} + +func isDigestInSlice(digest []byte, digestSlice [][]byte) bool { + for _, el := range digestSlice { + if bytes.Equal(digest, el) { + return true + } + } + return false +} + +func verifyMessageSignature(verifier signature.Verifier, msg MessageSignatureContent, artifact io.Reader) error { + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), artifact) + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} + +func verifyMessageSignatureWithArtifactDigest(verifier signature.Verifier, msg MessageSignatureContent, artifactDigest []byte) error { + if !bytes.Equal(artifactDigest, msg.Digest()) { + return errors.New("artifact does not match digest") + } + if _, ok := verifier.(*signature.ED25519Verifier); ok { + return errors.New("message signatures with ed25519 signatures can only be verified with artifacts, and not just their digest") + } + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), bytes.NewReader([]byte{}), options.WithDigest(artifactDigest)) + + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} + +// limitSubjects limits the number of subjects and digests in a statement to prevent DoS. +func limitSubjects(statement *in_toto.Statement) error { + if len(statement.Subject) > maxAllowedSubjects { + return fmt.Errorf("too many subjects: %d > %d", len(statement.Subject), maxAllowedSubjects) + } + for _, subject := range statement.Subject { + // limit the number of digests too + if len(subject.Digest) > maxAllowedSubjectDigests { + return fmt.Errorf("too many digests: %d > %d", len(subject.Digest), maxAllowedSubjectDigests) + } + } + return nil +} + +type multihasher struct { + io.Writer + hashfuncs []crypto.Hash + hashes []io.Writer +} + +func newMultihasher(hashfuncs []crypto.Hash) (*multihasher, error) { + if len(hashfuncs) == 0 { + return nil, errors.New("no hash functions specified") + } + hashes := make([]io.Writer, len(hashfuncs)) + for i := range hashfuncs { + hashes[i] = hashfuncs[i].New() + } + return &multihasher{ + Writer: io.MultiWriter(hashes...), + hashfuncs: hashfuncs, + hashes: hashes, + }, nil +} + +func (m *multihasher) Sum(b []byte) map[crypto.Hash][]byte { + sums := make(map[crypto.Hash][]byte, len(m.hashes)) + for i := range m.hashes { + sums[m.hashfuncs[i]] = m.hashes[i].(hash.Hash).Sum(b) + } + return sums +} + +func algStringToHashFunc(alg string) (crypto.Hash, error) { + switch alg { + case "sha256": + return crypto.SHA256, nil + case "sha384": + return crypto.SHA384, nil + case "sha512": + return crypto.SHA512, nil + default: + return 0, errors.New("unsupported digest algorithm") + } +} + +// getHashFunctions returns the smallest subset of supported hash functions +// that are needed to verify all subjects in a statement. +func getHashFunctions(statement *in_toto.Statement) ([]crypto.Hash, error) { + if len(statement.Subject) == 0 { + return nil, errors.New("no subjects found in statement") + } + + supportedHashFuncs := []crypto.Hash{crypto.SHA512, crypto.SHA384, crypto.SHA256} + chosenHashFuncs := make([]crypto.Hash, 0, len(supportedHashFuncs)) + subjectHashFuncs := make([][]crypto.Hash, len(statement.Subject)) + + // go through the statement and make a simple data structure to hold the + // list of hash funcs for each subject (subjectHashFuncs) + for i, subject := range statement.Subject { + for alg := range subject.Digest { + hf, err := algStringToHashFunc(alg) + if err != nil { + continue + } + subjectHashFuncs[i] = append(subjectHashFuncs[i], hf) + } + } + + // for each subject, see if we have chosen a compatible hash func, and if + // not, add the first one that is supported + for _, hfs := range subjectHashFuncs { + // if any of the hash funcs are already in chosenHashFuncs, skip + if len(intersection(hfs, chosenHashFuncs)) > 0 { + continue + } + + // check each supported hash func and add it if the subject + // has a digest for it + for _, hf := range supportedHashFuncs { + if slices.Contains(hfs, hf) { + chosenHashFuncs = append(chosenHashFuncs, hf) + break + } + } + } + + if len(chosenHashFuncs) == 0 { + return nil, errors.New("no supported digest algorithms found") + } + + return chosenHashFuncs, nil +} + +func intersection(a, b []crypto.Hash) []crypto.Hash { + var result []crypto.Hash + for _, x := range a { + if slices.Contains(b, x) { + result = append(result, x) + } + } + return result +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go new file mode 100644 index 00000000000..5751ec852cb --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go @@ -0,0 +1,882 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "encoding/asn1" + "encoding/json" + "errors" + "fmt" + "io" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "google.golang.org/protobuf/encoding/protojson" +) + +const ( + VerificationResultMediaType01 = "application/vnd.dev.sigstore.verificationresult+json;version=0.1" +) + +type Verifier struct { + trustedMaterial root.TrustedMaterial + config VerifierConfig +} + +type VerifierConfig struct { // nolint: revive + // requireSignedTimestamps requires RFC3161 timestamps to verify + // short-lived certificates + requireSignedTimestamps bool + // signedTimestampThreshold is the minimum number of verified + // RFC3161 timestamps in a bundle + signedTimestampThreshold int + // requireIntegratedTimestamps requires log entry integrated timestamps to + // verify short-lived certificates + requireIntegratedTimestamps bool + // integratedTimeThreshold is the minimum number of log entry + // integrated timestamps in a bundle + integratedTimeThreshold int + // requireObserverTimestamps requires RFC3161 timestamps and/or log + // integrated timestamps to verify short-lived certificates + requireObserverTimestamps bool + // observerTimestampThreshold is the minimum number of verified + // RFC3161 timestamps and/or log integrated timestamps in a bundle + observerTimestampThreshold int + // requireTlogEntries requires log inclusion proofs in a bundle + requireTlogEntries bool + // tlogEntriesThreshold is the minimum number of verified inclusion + // proofs in a bundle + tlogEntriesThreshold int + // requireSCTs requires SCTs in Fulcio certificates + requireSCTs bool + // ctlogEntriesThreshold is the minimum number of verified SCTs in + // a Fulcio certificate + ctlogEntriesThreshold int + // useCurrentTime uses the current time rather than a provided signed + // or log timestamp. Most workflows will not use this option + useCurrentTime bool + // allowNoTimestamp can be used to skip timestamp checks when a key + // is used rather than a certificate. + allowNoTimestamp bool +} + +type VerifierOption func(*VerifierConfig) error + +// NewVerifier creates a new Verifier. It takes a +// root.TrustedMaterial, which contains a set of trusted public keys and +// certificates, and a set of VerifierConfigurators, which set the config +// that determines the behaviour of the Verify function. +// +// VerifierConfig's set of options should match the properties of a given +// Sigstore deployment, i.e. whether to expect SCTs, Tlog entries, or signed +// timestamps. +func NewVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) { + var err error + c := VerifierConfig{} + + for _, opt := range options { + err = opt(&c) + if err != nil { + return nil, fmt.Errorf("failed to configure verifier: %w", err) + } + } + + err = c.Validate() + if err != nil { + return nil, err + } + + v := &Verifier{ + trustedMaterial: trustedMaterial, + config: c, + } + + return v, nil +} + +// TODO: Remove the following deprecated functions in a future release before sigstore-go 2.0. + +// Deprecated: Use Verifier instead +type SignedEntityVerifier = Verifier + +// Deprecated: Use NewVerifier instead +func NewSignedEntityVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) { + return NewVerifier(trustedMaterial, options...) +} + +// WithSignedTimestamps configures the Verifier to expect RFC 3161 +// timestamps from a Timestamp Authority, verify them using the TrustedMaterial's +// TimestampingAuthorities(), and, if it exists, use the resulting timestamp(s) +// to verify the Fulcio certificate. +func WithSignedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("signed timestamp threshold must be at least 1") + } + c.requireSignedTimestamps = true + c.signedTimestampThreshold = threshold + return nil + } +} + +// WithObserverTimestamps configures the Verifier to expect +// timestamps from either an RFC3161 timestamp authority or a log's +// SignedEntryTimestamp. These are verified using the TrustedMaterial's +// TimestampingAuthorities() or RekorLogs(), and used to verify +// the Fulcio certificate. +func WithObserverTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("observer timestamp threshold must be at least 1") + } + c.requireObserverTimestamps = true + c.observerTimestampThreshold = threshold + return nil + } +} + +// WithTransparencyLog configures the Verifier to expect +// Transparency Log inclusion proofs or SignedEntryTimestamps, verifying them +// using the TrustedMaterial's RekorLogs(). +func WithTransparencyLog(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("transparency log entry threshold must be at least 1") + } + c.requireTlogEntries = true + c.tlogEntriesThreshold = threshold + return nil + } +} + +// WithIntegratedTimestamps configures the Verifier to +// expect log entry integrated timestamps from either SignedEntryTimestamps +// or live log lookups. +func WithIntegratedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + c.requireIntegratedTimestamps = true + c.integratedTimeThreshold = threshold + return nil + } +} + +// WithSignedCertificateTimestamps configures the Verifier to +// expect the Fulcio certificate to have a SignedCertificateTimestamp, and +// verify it using the TrustedMaterial's CTLogAuthorities(). +func WithSignedCertificateTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("ctlog entry threshold must be at least 1") + } + c.requireSCTs = true + c.ctlogEntriesThreshold = threshold + return nil + } +} + +// WithCurrentTime configures the Verifier to not expect +// any timestamps from either a Timestamp Authority or a Transparency Log. +// This option should not be enabled when verifying short-lived certificates, +// as an observer timestamp is needed. This option is useful primarily for +// private deployments with long-lived code signing certificates. +func WithCurrentTime() VerifierOption { + return func(c *VerifierConfig) error { + c.useCurrentTime = true + return nil + } +} + +// WithNoObserverTimestamps configures the Verifier to not expect +// any timestamps from either a Timestamp Authority or a Transparency Log +// and to not use the current time to verify a certificate. This may only +// be used when verifying with keys rather than certificates. +func WithNoObserverTimestamps() VerifierOption { + return func(c *VerifierConfig) error { + c.allowNoTimestamp = true + return nil + } +} + +func (c *VerifierConfig) Validate() error { + if c.allowNoTimestamp && (c.requireObserverTimestamps || c.requireSignedTimestamps || c.requireIntegratedTimestamps || c.useCurrentTime) { + return errors.New("specify WithNoObserverTimestamps() without any other verifier options") + } + if !c.requireObserverTimestamps && !c.requireSignedTimestamps && !c.requireIntegratedTimestamps && !c.useCurrentTime && !c.allowNoTimestamp { + return errors.New("when initializing a new Verifier, you must specify at least one of " + + "WithObserverTimestamps(), WithSignedTimestamps(), WithIntegratedTimestamps() or WithCurrentTime(), " + + "or exclusively specify WithNoObserverTimestamps()") + } + + return nil +} + +type VerificationResult struct { + MediaType string `json:"mediaType"` + Statement *in_toto.Statement `json:"statement,omitempty"` + Signature *SignatureVerificationResult `json:"signature,omitempty"` + VerifiedTimestamps []TimestampVerificationResult `json:"verifiedTimestamps"` + VerifiedIdentity *CertificateIdentity `json:"verifiedIdentity,omitempty"` +} + +type SignatureVerificationResult struct { + PublicKeyID *[]byte `json:"publicKeyId,omitempty"` + Certificate *certificate.Summary `json:"certificate,omitempty"` +} + +type TimestampVerificationResult struct { + Type string `json:"type"` + URI string `json:"uri"` + Timestamp time.Time `json:"timestamp"` +} + +func NewVerificationResult() *VerificationResult { + return &VerificationResult{ + MediaType: VerificationResultMediaType01, + } +} + +// MarshalJSON deals with protojson needed for the Statement. +// Can be removed when https://github.com/in-toto/attestation/pull/403 is merged. +func (b *VerificationResult) MarshalJSON() ([]byte, error) { + statement, err := protojson.Marshal(b.Statement) + if err != nil { + return nil, err + } + // creating a type alias to avoid infinite recursion, as MarshalJSON is + // not copied into the alias. + type Alias VerificationResult + return json.Marshal(struct { + Alias + Statement json.RawMessage `json:"statement,omitempty"` + }{ + Alias: Alias(*b), + Statement: statement, + }) +} + +func (b *VerificationResult) UnmarshalJSON(data []byte) error { + b.Statement = &in_toto.Statement{} + type Alias VerificationResult + aux := &struct { + Alias + Statement json.RawMessage `json:"statement,omitempty"` + }{ + Alias: Alias(*b), + } + if err := json.Unmarshal(data, aux); err != nil { + return err + } + return protojson.Unmarshal(aux.Statement, b.Statement) +} + +type PolicyOption func(*PolicyConfig) error +type ArtifactPolicyOption func(*PolicyConfig) error + +// PolicyBuilder is responsible for building & validating a PolicyConfig +type PolicyBuilder struct { + artifactPolicy ArtifactPolicyOption + policyOptions []PolicyOption +} + +func (pc PolicyBuilder) options() []PolicyOption { + arr := []PolicyOption{PolicyOption(pc.artifactPolicy)} + return append(arr, pc.policyOptions...) +} + +func (pc PolicyBuilder) BuildConfig() (*PolicyConfig, error) { + var err error + + policy := &PolicyConfig{} + for _, applyOption := range pc.options() { + err = applyOption(policy) + if err != nil { + return nil, err + } + } + + if err := policy.validate(); err != nil { + return nil, err + } + + return policy, nil +} + +type ArtifactDigest struct { + Algorithm string + Digest []byte +} + +type PolicyConfig struct { + ignoreArtifact bool + ignoreIdentities bool + requireSigningKey bool + certificateIdentities CertificateIdentities + verifyArtifacts bool + artifacts []io.Reader + verifyArtifactDigests bool + artifactDigests []ArtifactDigest +} + +func (p *PolicyConfig) withVerifyAlreadyConfigured() error { + if p.verifyArtifacts || p.verifyArtifactDigests { + return errors.New("only one invocation of WithArtifact/WithArtifacts/WithArtifactDigest/WithArtifactDigests is allowed") + } + + return nil +} + +func (p *PolicyConfig) validate() error { + if p.RequireIdentities() && len(p.certificateIdentities) == 0 { + return errors.New("can't verify identities without providing at least one identity") + } + + return nil +} + +// RequireArtifact returns true if the Verify algorithm should perform +// signature verification with an an artifact provided by either the +// WithArtifact or the WithArtifactDigest functions. +// +// By default, unless explicitly turned off, we should always expect to verify +// a SignedEntity's signature using an artifact. Bools are initialized to false, +// so this behaviour is therefore controlled by the ignoreArtifact field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) RequireArtifact() bool { + return !p.ignoreArtifact +} + +// RequireIdentities returns true if the Verify algorithm should check +// whether the SignedEntity's certificate was created by one of the identities +// provided by the WithCertificateIdentity function. +// +// By default, unless explicitly turned off, we should always expect to enforce +// that a SignedEntity's certificate was created by an Identity we trust. Bools +// are initialized to false, so this behaviour is therefore controlled by the +// ignoreIdentities field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) RequireIdentities() bool { + return !p.ignoreIdentities +} + +// RequireSigningKey returns true if we expect the SignedEntity to be signed +// with a key and not a certificate. +func (p *PolicyConfig) RequireSigningKey() bool { + return p.requireSigningKey +} + +func NewPolicy(artifactOpt ArtifactPolicyOption, options ...PolicyOption) PolicyBuilder { + return PolicyBuilder{artifactPolicy: artifactOpt, policyOptions: options} +} + +// WithoutIdentitiesUnsafe allows the caller of Verify to skip enforcing any +// checks on the identity that created the SignedEntity being verified. +// +// Do not use this option unless you know what you are doing! +// +// As the name implies, using WithoutIdentitiesUnsafe is not safe: outside of +// exceptional circumstances, we should always enforce that the SignedEntity +// being verified was signed by a trusted CertificateIdentity. +// +// For more information, consult WithCertificateIdentity. +func WithoutIdentitiesUnsafe() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithoutIdentitiesUnsafe while specifying CertificateIdentities") + } + + p.ignoreIdentities = true + return nil + } +} + +// WithCertificateIdentity allows the caller of Verify to enforce that the +// SignedEntity being verified was signed by the given identity, as defined by +// the Fulcio certificate embedded in the entity. If this policy is enabled, +// but the SignedEntity does not have a certificate, verification will fail. +// +// Providing this function multiple times will concatenate the provided +// CertificateIdentity to the list of identities being checked. +// +// If all of the provided CertificateIdentities fail to match the Fulcio +// certificate, then verification will fail. If *any* CertificateIdentity +// matches, then verification will succeed. Therefore, each CertificateIdentity +// provided to this function must define a "sufficient" identity to trust. +// +// The CertificateIdentity struct allows callers to specify: +// - The exact value, or Regexp, of the SubjectAlternativeName +// - The exact value of any Fulcio OID X.509 extension, i.e. Issuer +// +// For convenience, consult the NewShortCertificateIdentity function. +func WithCertificateIdentity(identity CertificateIdentity) PolicyOption { + return func(p *PolicyConfig) error { + if p.ignoreIdentities { + return errors.New("can't use WithCertificateIdentity while using WithoutIdentitiesUnsafe") + } + if p.requireSigningKey { + return errors.New("can't use WithCertificateIdentity while using WithKey") + } + + p.certificateIdentities = append(p.certificateIdentities, identity) + return nil + } +} + +// WithKey allows the caller of Verify to require the SignedEntity being +// verified was signed with a key and not a certificate. +func WithKey() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithKey while using WithCertificateIdentity") + } + + p.requireSigningKey = true + p.ignoreIdentities = true + return nil + } +} + +// WithoutArtifactUnsafe allows the caller of Verify to skip checking whether +// the SignedEntity was created from, or references, an artifact. +// +// WithoutArtifactUnsafe can only be used with SignedEntities that contain a +// DSSE envelope. If the the SignedEntity has a MessageSignature, providing +// this policy option will cause verification to always fail, since +// MessageSignatures can only be verified in the presence of an Artifact or +// artifact digest. See WithArtifact/WithArtifactDigest for more information. +// +// Do not use this function unless you know what you are doing! +// +// As the name implies, using WithoutArtifactUnsafe is not safe: outside of +// exceptional circumstances, SignedEntities should always be verified with +// an artifact. +func WithoutArtifactUnsafe() ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + p.ignoreArtifact = true + return nil + } +} + +// WithArtifact allows the caller of Verify to enforce that the SignedEntity +// being verified was created from, or references, a given artifact. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// calculated from the given artifact, and compared to the digest in the +// envelope's statement. +func WithArtifact(artifact io.Reader) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifact while using WithoutArtifactUnsafe") + } + + p.verifyArtifacts = true + p.artifacts = []io.Reader{artifact} + return nil + } +} + +// WithArtifacts allows the caller of Verify to enforce that the SignedEntity +// being verified was created from, or references, a slice of artifacts. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// calculated from the given artifact, and compared to the digest in the +// envelope's statement. +func WithArtifacts(artifacts []io.Reader) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifacts while using WithoutArtifactUnsafe") + } + + p.verifyArtifacts = true + p.artifacts = artifacts + return nil + } +} + +// WithArtifactDigest allows the caller of Verify to enforce that the +// SignedEntity being verified was created for a given artifact digest. +// +// If the SignedEntity contains a MessageSignature that was signed using the +// ED25519 algorithm, then providing only an artifactDigest will fail; the +// whole artifact must be provided. Use WithArtifact instead. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// compared to the digest in the envelope's statement. +func WithArtifactDigest(algorithm string, artifactDigest []byte) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifactDigest while using WithoutArtifactUnsafe") + } + + p.verifyArtifactDigests = true + p.artifactDigests = []ArtifactDigest{{ + Algorithm: algorithm, + Digest: artifactDigest, + }} + return nil + } +} + +// WithArtifactDigests allows the caller of Verify to enforce that the +// SignedEntity being verified was created for a given array of artifact digests. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digests +// are compared to the digests in the envelope's statement. +// +// If the SignedEntity does not contain a DSSE envelope, verification fails. +func WithArtifactDigests(digests []ArtifactDigest) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if err := p.withVerifyAlreadyConfigured(); err != nil { + return err + } + + if p.ignoreArtifact { + return errors.New("can't use WithArtifactDigests while using WithoutArtifactUnsafe") + } + + p.verifyArtifactDigests = true + p.artifactDigests = digests + return nil + } +} + +// Verify checks the cryptographic integrity of a given SignedEntity according +// to the options configured in the NewVerifier. Its purpose is to +// determine whether the SignedEntity was created by a Sigstore deployment we +// trust, as defined by keys in our TrustedMaterial. +// +// If the SignedEntity contains a MessageSignature, then the artifact or its +// digest must be provided to the Verify function, as it is required to verify +// the signature. See WithArtifact and WithArtifactDigest for more details. +// +// If and only if verification is successful, Verify will return a +// VerificationResult struct whose contents' integrity have been verified. +// Verify may then verify the contents of the VerificationResults using supplied +// PolicyOptions. See WithCertificateIdentity for more details. +// +// Callers of this function SHOULD ALWAYS: +// - (if the signed entity has a certificate) verify that its Subject Alternate +// Name matches a trusted identity, and that its OID Issuer field matches an +// expected value +// - (if the signed entity has a dsse envelope) verify that the envelope's +// statement's subject matches the artifact being verified +func (v *Verifier) Verify(entity SignedEntity, pb PolicyBuilder) (*VerificationResult, error) { + policy, err := pb.BuildConfig() + if err != nil { + return nil, fmt.Errorf("failed to build policy: %w", err) + } + + // Let's go by the spec: https://docs.google.com/document/d/1kbhK2qyPPk8SLavHzYSDM8-Ueul9_oxIMVFuWMWKz0E/edit#heading=h.g11ovq2s1jxh + // > ## Transparency Log Entry + verifiedTlogTimestamps, err := v.VerifyTransparencyLogInclusion(entity) + if err != nil { + return nil, fmt.Errorf("failed to verify log inclusion: %w", err) + } + + // > ## Establishing a Time for the Signature + // > First, establish a time for the signature. This timestamp is required to validate the certificate chain, so this step comes first. + verifiedTimestamps, err := v.VerifyObserverTimestamps(entity, verifiedTlogTimestamps) + if err != nil { + return nil, fmt.Errorf("failed to verify timestamps: %w", err) + } + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch verification content: %w", err) + } + + var signedWithCertificate bool + var certSummary certificate.Summary + + // If the bundle was signed with a long-lived key, and does not have a Fulcio certificate, + // then skip the certificate verification steps + if leafCert := verificationContent.Certificate(); leafCert != nil { + if policy.RequireSigningKey() { + return nil, errors.New("expected key signature, not certificate") + } + if v.config.allowNoTimestamp { + return nil, errors.New("must provide timestamp to verify certificate") + } + + signedWithCertificate = true + + // Get the summary before modifying the cert extensions + certSummary, err = certificate.SummarizeCertificate(leafCert) + if err != nil { + return nil, fmt.Errorf("failed to summarize certificate: %w", err) + } + + // From spec: + // > ## Certificate + // > … + // > The Verifier MUST perform certification path validation (RFC 5280 §6) of the certificate chain with the pre-distributed Fulcio root certificate(s) as a trust anchor, but with a fake “current time.” If a timestamp from the timestamping service is available, the Verifier MUST perform path validation using the timestamp from the Timestamping Service. If a timestamp from the Transparency Service is available, the Verifier MUST perform path validation using the timestamp from the Transparency Service. If both are available, the Verifier performs path validation twice. If either fails, verification fails. + + // Go does not support the OtherName GeneralName SAN extension. If + // Fulcio issued the certificate with an OtherName SAN, it will be + // handled by SummarizeCertificate above, and it must be removed here + // or the X.509 verification will fail. + if len(leafCert.UnhandledCriticalExtensions) > 0 { + var unhandledExts []asn1.ObjectIdentifier + for _, oid := range leafCert.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + unhandledExts = append(unhandledExts, oid) + } + } + leafCert.UnhandledCriticalExtensions = unhandledExts + } + + var chains [][]*x509.Certificate + for _, verifiedTs := range verifiedTimestamps { + // verify the leaf certificate against the root + chains, err = VerifyLeafCertificate(verifiedTs.Timestamp, leafCert, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify leaf certificate: %w", err) + } + } + + // From spec: + // > Unless performing online verification (see §Alternative Workflows), the Verifier MUST extract the SignedCertificateTimestamp embedded in the leaf certificate, and verify it as in RFC 9162 §8.1.3, using the verification key from the Certificate Transparency Log. + + if v.config.requireSCTs { + err = VerifySignedCertificateTimestamp(chains, v.config.ctlogEntriesThreshold, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify signed certificate timestamp: %w", err) + } + } + } + + // If SCTs are required, ensure the bundle is certificate-signed not public key-signed + if v.config.requireSCTs { + if verificationContent.PublicKey() != nil { + return nil, errors.New("SCTs required but bundle is signed with a public key, which cannot contain SCTs") + } + } + + // From spec: + // > ## Signature Verification + // > The Verifier MUST verify the provided signature for the constructed payload against the key in the leaf of the certificate chain. + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch signature content: %w", err) + } + + entityVersion, err := entity.Version() + if err != nil { + return nil, fmt.Errorf("failed to fetch entity version: %w", err) + } + + var enableCompat bool + switch entityVersion { + case "v0.1": + fallthrough + case "0.1": + fallthrough + case "v0.2": + fallthrough + case "0.2": + fallthrough + case "v0.3": + fallthrough + case "0.3": + enableCompat = true + } + verifier, err := getSignatureVerifier(sigContent, verificationContent, v.trustedMaterial, enableCompat) + if err != nil { + return nil, fmt.Errorf("failed to get signature verifier: %w", err) + } + + if policy.RequireArtifact() { + switch { + case policy.verifyArtifacts: + err = verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifacts) + case policy.verifyArtifactDigests: + err = verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifactDigests) + default: + // should never happen, but just in case: + err = errors.New("no artifact or artifact digest provided") + } + } else { + // verifying with artifact has been explicitly turned off, so just check + // the signature on the dsse envelope: + err = verifySignatureWithVerifier(verifier, sigContent, verificationContent, v.trustedMaterial) + } + + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %w", err) + } + + // Hooray! We've verified all of the entity's constituent parts! 🎉 🥳 + // Now we can construct the results object accordingly. + result := NewVerificationResult() + if signedWithCertificate { + result.Signature = &SignatureVerificationResult{ + Certificate: &certSummary, + } + } else { + pubKeyID := []byte(verificationContent.PublicKey().Hint()) + result.Signature = &SignatureVerificationResult{ + PublicKeyID: &pubKeyID, + } + } + + // SignatureContent can be either an Envelope or a MessageSignature. + // If it's an Envelope, let's pop the Statement for our results: + if envelope := sigContent.EnvelopeContent(); envelope != nil { + stmt, err := envelope.Statement() + if err != nil { + return nil, fmt.Errorf("failed to fetch envelope statement: %w", err) + } + + result.Statement = stmt + } + + result.VerifiedTimestamps = verifiedTimestamps + + // Now that the signed entity's crypto material has been verified, and the + // result struct has been constructed, we can optionally enforce some + // additional policies: + // -------------------- + + // From ## Certificate section, + // >The Verifier MUST then check the certificate against the verification policy. Details on how to do this depend on the verification policy, but the Verifier SHOULD check the Issuer X.509 extension (OID 1.3.6.1.4.1.57264.1.1) at a minimum, and will in most cases check the SubjectAlternativeName as well. See Spec: Fulcio §TODO for example checks on the certificate. + if policy.RequireIdentities() { + if !signedWithCertificate { + // We got asked to verify identities, but the entity was not signed with + // a certificate. That's a problem! + return nil, errors.New("can't verify certificate identities: entity was not signed with a certificate") + } + + if len(policy.certificateIdentities) == 0 { + return nil, errors.New("can't verify certificate identities: no identities provided") + } + + matchingCertID, err := policy.certificateIdentities.Verify(certSummary) + if err != nil { + return nil, fmt.Errorf("failed to verify certificate identity: %w", err) + } + + result.VerifiedIdentity = matchingCertID + } + + return result, nil +} + +// VerifyTransparencyLogInclusion verifies TlogEntries if expected. Optionally returns +// a list of verified timestamps from the log integrated timestamps when verifying +// with observer timestamps. +// TODO: Return a different verification result for logs specifically (also for #48) +func (v *Verifier) VerifyTransparencyLogInclusion(entity SignedEntity) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + if v.config.requireTlogEntries { + // log timestamps should be verified if with WithIntegratedTimestamps or WithObserverTimestamps is used + verifiedTlogTimestamps, err := VerifyTlogEntry(entity, v.trustedMaterial, v.config.tlogEntriesThreshold, + v.config.requireIntegratedTimestamps || v.config.requireObserverTimestamps) + if err != nil { + return nil, err + } + + for _, vts := range verifiedTlogTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "Tlog", URI: vts.URI, Timestamp: vts.Time}) + } + } + + return verifiedTimestamps, nil +} + +// VerifyObserverTimestamps verifies RFC3161 signed timestamps, and verifies +// that timestamp thresholds are met with log entry integrated timestamps, +// signed timestamps, or a combination of both. The returned timestamps +// can be used to verify short-lived certificates. +// logTimestamps may be populated with verified log entry integrated timestamps +// In order to be verifiable, a SignedEntity must have at least one verified +// "observer timestamp". +func (v *Verifier) VerifyObserverTimestamps(entity SignedEntity, logTimestamps []TimestampVerificationResult) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + // From spec: + // > … if verification or timestamp parsing fails, the Verifier MUST abort + if v.config.requireSignedTimestamps { + verifiedSignedTimestamps, err := VerifySignedTimestampWithThreshold(entity, v.trustedMaterial, v.config.signedTimestampThreshold) + if err != nil { + return nil, err + } + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time}) + } + } + + if v.config.requireIntegratedTimestamps { + if len(logTimestamps) < v.config.integratedTimeThreshold { + return nil, fmt.Errorf("threshold not met for verified log entry integrated timestamps: %d < %d", len(logTimestamps), v.config.integratedTimeThreshold) + } + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + } + + if v.config.requireObserverTimestamps { + verifiedSignedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify signed timestamps: %w", err) + } + + // check threshold for both RFC3161 and log timestamps + tsCount := len(verifiedSignedTimestamps) + len(logTimestamps) + if tsCount < v.config.observerTimestampThreshold { + return nil, fmt.Errorf("threshold not met for verified signed & log entry integrated timestamps: %d < %d; error: %w", + tsCount, v.config.observerTimestampThreshold, errors.Join(verificationErrors...)) + } + + // append all timestamps + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time}) + } + } + + if v.config.useCurrentTime { + // use current time to verify certificate if no signed timestamps are provided + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "CurrentTime", URI: "", Timestamp: time.Now()}) + } + + if len(verifiedTimestamps) == 0 && !v.config.allowNoTimestamp { + return nil, fmt.Errorf("no valid observer timestamps found") + } + + return verifiedTimestamps, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go new file mode 100644 index 00000000000..eff84be07e1 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go @@ -0,0 +1,187 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "crypto" + "encoding/hex" + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" + "github.com/sigstore/sigstore/pkg/signature" +) + +const maxAllowedTlogEntries = 32 + +// VerifyTlogEntry verifies that the given entity has been logged +// in the transparency log and that the log entry is valid. +// +// The threshold parameter is the number of unique transparency log entries +// that must be verified. +func VerifyTlogEntry(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive + entries, err := entity.TlogEntries() + if err != nil { + return nil, err + } + + // limit the number of tlog entries to prevent DoS + if len(entries) > maxAllowedTlogEntries { + return nil, fmt.Errorf("too many tlog entries: %d > %d", len(entries), maxAllowedTlogEntries) + } + + // disallow duplicate entries, as a malicious actor could use duplicates to bypass the threshold + for i := 0; i < len(entries); i++ { + for j := i + 1; j < len(entries); j++ { + if entries[i].LogKeyID() == entries[j].LogKeyID() && entries[i].LogIndex() == entries[j].LogIndex() { + return nil, errors.New("duplicate tlog entries found") + } + } + } + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, err + } + + entitySignature := sigContent.Signature() + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, err + } + + verifiedTimestamps := []root.Timestamp{} + logEntriesVerified := 0 + + for _, entry := range entries { + err := tlog.ValidateEntry(entry) + if err != nil { + return nil, err + } + + rekorLogs := trustedMaterial.RekorLogs() + keyID := entry.LogKeyID() + hex64Key := hex.EncodeToString([]byte(keyID)) + tlogVerifier, ok := trustedMaterial.RekorLogs()[hex64Key] + if !ok { + // skip entries the trust root cannot verify + continue + } + + if !entry.HasInclusionPromise() && !entry.HasInclusionProof() { + return nil, fmt.Errorf("entry must contain an inclusion proof and/or promise") + } + if entry.HasInclusionPromise() { + err = tlog.VerifySET(entry, rekorLogs) + if err != nil { + // skip entries the trust root cannot verify + continue + } + if trustIntegratedTime { + verifiedTimestamps = append(verifiedTimestamps, root.Timestamp{Time: entry.IntegratedTime(), URI: tlogVerifier.BaseURL}) + } + } + if entry.HasInclusionProof() { + verifier, err := getVerifier(tlogVerifier.PublicKey, tlogVerifier.SignatureHashFunc) + if err != nil { + return nil, err + } + + if hasRekorV1STH(entry) { + err = tlog.VerifyInclusion(entry, *verifier) + } else { + if tlogVerifier.BaseURL == "" { + return nil, fmt.Errorf("cannot verify Rekor v2 entry without baseUrl in transparency log's trusted root") + } + u, err := url.Parse(tlogVerifier.BaseURL) + if err != nil { + return nil, err + } + err = tlog.VerifyCheckpointAndInclusion(entry, *verifier, u.Hostname()) + if err != nil { + return nil, err + } + } + if err != nil { + return nil, err + } + // DO NOT use timestamp with only an inclusion proof, because it is not signed metadata + } + + // Ensure entry signature matches signature from bundle + if !bytes.Equal(entry.Signature(), entitySignature) { + return nil, errors.New("transparency log signature does not match") + } + + // Ensure entry certificate matches bundle certificate + if !verificationContent.CompareKey(entry.PublicKey(), trustedMaterial) { + return nil, errors.New("transparency log certificate does not match") + } + + // TODO: if you have access to artifact, check that it matches body subject + + // Check tlog entry time against bundle certificates + if !entry.IntegratedTime().IsZero() { + if !verificationContent.ValidAtTime(entry.IntegratedTime(), trustedMaterial) { + return nil, errors.New("integrated time outside certificate validity") + } + } + + // successful log entry verification + logEntriesVerified++ + } + + if logEntriesVerified < logThreshold { + return nil, fmt.Errorf("not enough verified log entries from transparency log: %d < %d", logEntriesVerified, logThreshold) + } + + return verifiedTimestamps, nil +} + +func getVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (*signature.Verifier, error) { + verifier, err := signature.LoadVerifier(publicKey, hashFunc) + if err != nil { + return nil, err + } + + return &verifier, nil +} + +// TODO: remove this deprecated function before 2.0 + +// Deprecated: use VerifyTlogEntry instead +func VerifyArtifactTransparencyLog(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive + return VerifyTlogEntry(entity, trustedMaterial, logThreshold, trustIntegratedTime) +} + +var treeIDSuffixRegex = regexp.MustCompile(".* - [0-9]+$") + +// hasRekorV1STH checks if the checkpoint has a Rekor v1-style Signed Tree Head +// which contains a numeric Tree ID as part of its checkpoint origin. +func hasRekorV1STH(entry *tlog.Entry) bool { + tle := entry.TransparencyLogEntry() + checkpointBody := tle.GetInclusionProof().GetCheckpoint().GetEnvelope() + checkpointLines := strings.Split(checkpointBody, "\n") + if len(checkpointLines) < 4 { + return false + } + return treeIDSuffixRegex.MatchString(checkpointLines[0]) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go new file mode 100644 index 00000000000..1c10445dd98 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go @@ -0,0 +1,120 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "errors" + "fmt" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +const maxAllowedTimestamps = 32 + +// VerifySignedTimestamp verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +func VerifySignedTimestamp(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive + signedTimestamps, err := entity.Timestamps() + if err != nil { + return nil, nil, err + } + + // limit the number of timestamps to prevent DoS + if len(signedTimestamps) > maxAllowedTimestamps { + return nil, nil, fmt.Errorf("too many signed timestamps: %d > %d", len(signedTimestamps), maxAllowedTimestamps) + } + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, nil, err + } + + signatureBytes := sigContent.Signature() + + verifiedTimestamps := []*root.Timestamp{} + var verificationErrors []error + for _, timestamp := range signedTimestamps { + verifiedSignedTimestamp, err := verifySignedTimestamp(timestamp, signatureBytes, trustedMaterial) + if err != nil { + verificationErrors = append(verificationErrors, err) + continue + } + if isDuplicateTSA(verifiedTimestamps, verifiedSignedTimestamp) { + verificationErrors = append(verificationErrors, fmt.Errorf("duplicate timestamps from the same authority, ignoring %s", verifiedSignedTimestamp.URI)) + continue + } + + verifiedTimestamps = append(verifiedTimestamps, verifiedSignedTimestamp) + } + + return verifiedTimestamps, verificationErrors, nil +} + +// isDuplicateTSA checks if the given verified signed timestamp is a duplicate +// of any of the verified timestamps. +// This is used to prevent replay attacks and ensure a single compromised TSA +// cannot meet the threshold. +func isDuplicateTSA(verifiedTimestamps []*root.Timestamp, verifiedSignedTimestamp *root.Timestamp) bool { + for _, ts := range verifiedTimestamps { + if ts.URI == verifiedSignedTimestamp.URI { + return true + } + } + return false +} + +// VerifySignedTimestamp verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +// +// The threshold parameter is the number of unique timestamps that must be +// verified. +func VerifySignedTimestampWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive + verifiedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, trustedMaterial) + if err != nil { + return nil, err + } + if len(verifiedTimestamps) < threshold { + return nil, fmt.Errorf("threshold not met for verified signed timestamps: %d < %d; error: %w", len(verifiedTimestamps), threshold, errors.Join(verificationErrors...)) + } + return verifiedTimestamps, nil +} + +func verifySignedTimestamp(signedTimestamp []byte, signatureBytes []byte, trustedMaterial root.TrustedMaterial) (*root.Timestamp, error) { + timestampAuthorities := trustedMaterial.TimestampingAuthorities() + + var errs []error + + // Iterate through TSA certificate authorities to find one that verifies + for _, tsa := range timestampAuthorities { + ts, err := tsa.Verify(signedTimestamp, signatureBytes) + if err == nil { + return ts, nil + } + errs = append(errs, err) + } + + return nil, fmt.Errorf("unable to verify signed timestamps: %w", errors.Join(errs...)) +} + +// TODO: remove below deprecated functions before 2.0 + +// Deprecated: use VerifySignedTimestamp instead. +func VerifyTimestampAuthority(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive + return VerifySignedTimestamp(entity, trustedMaterial) +} + +// Deprecated: use VerifySignedTimestampWithThreshold instead. +func VerifyTimestampAuthorityWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive + return VerifySignedTimestampWithThreshold(entity, trustedMaterial, threshold) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go new file mode 100644 index 00000000000..5f801095e3f --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go @@ -0,0 +1,77 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dsse includes wrappers to support DSSE +package dsse + +import ( + "bytes" + "context" + "crypto" + "errors" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// SignerAdapter wraps a `sigstore/signature.Signer`, making it compatible with `go-securesystemslib/dsse.Signer`. +type SignerAdapter struct { + SignatureSigner signature.Signer + Pub crypto.PublicKey + Opts []signature.SignOption + PubKeyID string +} + +// Sign implements `go-securesystemslib/dsse.Signer` +func (a *SignerAdapter) Sign(ctx context.Context, data []byte) ([]byte, error) { + return a.SignatureSigner.SignMessage(bytes.NewReader(data), append(a.Opts, options.WithContext(ctx))...) +} + +// Verify disabled `go-securesystemslib/dsse.Verifier` +func (a *SignerAdapter) Verify(_ context.Context, _, _ []byte) error { + return errors.New("Verify disabled") +} + +// Public implements `go-securesystemslib/dsse.Verifier` +func (a *SignerAdapter) Public() crypto.PublicKey { + return a.Pub +} + +// KeyID implements `go-securesystemslib/dsse.Verifier` +func (a SignerAdapter) KeyID() (string, error) { + return a.PubKeyID, nil +} + +// VerifierAdapter wraps a `sigstore/signature.Verifier`, making it compatible with `go-securesystemslib/dsse.Verifier`. +type VerifierAdapter struct { + SignatureVerifier signature.Verifier + Pub crypto.PublicKey + PubKeyID string +} + +// Verify implements `go-securesystemslib/dsse.Verifier` +func (a *VerifierAdapter) Verify(ctx context.Context, data, sig []byte) error { + return a.SignatureVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithContext(ctx)) +} + +// Public implements `go-securesystemslib/dsse.Verifier` +func (a *VerifierAdapter) Public() crypto.PublicKey { + return a.Pub +} + +// KeyID implements `go-securesystemslib/dsse.Verifier` +func (a *VerifierAdapter) KeyID() (string, error) { + return a.PubKeyID, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go new file mode 100644 index 00000000000..f58f18068f0 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dsse contains handlers for Dead Simple Signing Envelopes +package dsse diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go new file mode 100644 index 00000000000..628e2413c0e --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go @@ -0,0 +1,152 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "crypto" + "encoding/base64" + "encoding/json" + "io" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore/pkg/signature" +) + +// WrapSigner returns a signature.Signer that uses the DSSE encoding format +func WrapSigner(s signature.Signer, payloadType string) signature.Signer { + return &wrappedSigner{ + s: s, + payloadType: payloadType, + } +} + +type wrappedSigner struct { + s signature.Signer + payloadType string +} + +// PublicKey returns the public key associated with the signer +func (w *wrappedSigner) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.s.PublicKey(opts...) +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (w *wrappedSigner) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) { + p, err := io.ReadAll(r) + if err != nil { + return nil, err + } + pae := dsse.PAE(w.payloadType, p) + sig, err := w.s.SignMessage(bytes.NewReader(pae), opts...) + if err != nil { + return nil, err + } + + env := dsse.Envelope{ + PayloadType: w.payloadType, + Payload: base64.StdEncoding.EncodeToString(p), + Signatures: []dsse.Signature{ + { + Sig: base64.StdEncoding.EncodeToString(sig), + }, + }, + } + return json.Marshal(env) +} + +// WrapVerifier returns a signature.Verifier that uses the DSSE encoding format +func WrapVerifier(v signature.Verifier) signature.Verifier { + return &wrappedVerifier{ + v: v, + } +} + +type wrappedVerifier struct { + v signature.Verifier +} + +// PublicKey returns the public key associated with the verifier +func (w *wrappedVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.v.PublicKey(opts...) +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (w *wrappedVerifier) VerifySignature(s, _ io.Reader, _ ...signature.VerifyOption) error { + sig, err := io.ReadAll(s) + if err != nil { + return err + } + + env := dsse.Envelope{} + if err := json.Unmarshal(sig, &env); err != nil { + return err + } + + pub, err := w.PublicKey() + if err != nil { + return err + } + verifier, err := dsse.NewEnvelopeVerifier(&VerifierAdapter{ + SignatureVerifier: w.v, + + Pub: pub, + PubKeyID: "", // We do not want to limit verification to a specific key. + }) + if err != nil { + return err + } + + _, err = verifier.Verify(context.Background(), &env) + return err +} + +// WrapSignerVerifier returns a signature.SignerVerifier that uses the DSSE encoding format +func WrapSignerVerifier(sv signature.SignerVerifier, payloadType string) signature.SignerVerifier { + signer := &wrappedSigner{ + payloadType: payloadType, + s: sv, + } + verifier := &wrappedVerifier{ + v: sv, + } + + return &wrappedSignerVerifier{ + signer: signer, + verifier: verifier, + } +} + +type wrappedSignerVerifier struct { + signer *wrappedSigner + verifier *wrappedVerifier +} + +// PublicKey returns the public key associated with the verifier +func (w *wrappedSignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.signer.PublicKey(opts...) +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (w *wrappedSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error { + return w.verifier.VerifySignature(s, r, opts...) +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (w *wrappedSignerVerifier) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) { + return w.signer.SignMessage(r, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go new file mode 100644 index 00000000000..34ebd5a41e8 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go @@ -0,0 +1,186 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "context" + "crypto" + "encoding/json" + "errors" + "io" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore/pkg/signature" +) + +type wrappedMultiSigner struct { + sLAdapters []dsse.Signer + payloadType string +} + +// WrapMultiSigner returns a signature.Signer that uses the DSSE encoding format +func WrapMultiSigner(payloadType string, sL ...signature.Signer) signature.Signer { + signerAdapterL := make([]dsse.Signer, 0, len(sL)) + for _, s := range sL { + pub, err := s.PublicKey() + if err != nil { + return nil + } + + keyID, err := dsse.SHA256KeyID(pub) + if err != nil { + keyID = "" + } + + signerAdapter := &SignerAdapter{ + SignatureSigner: s, + Pub: s.PublicKey, + PubKeyID: keyID, // We do not want to limit verification to a specific key. + } + + signerAdapterL = append(signerAdapterL, signerAdapter) + } + + return &wrappedMultiSigner{ + sLAdapters: signerAdapterL, + payloadType: payloadType, + } +} + +// PublicKey returns the public key associated with the signer +func (wL *wrappedMultiSigner) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return nil, errors.New("not supported for multi signatures") +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (wL *wrappedMultiSigner) SignMessage(r io.Reader, _ ...signature.SignOption) ([]byte, error) { + p, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + envSigner, err := dsse.NewEnvelopeSigner(wL.sLAdapters...) + if err != nil { + return nil, err + } + + env, err := envSigner.SignPayload(context.Background(), wL.payloadType, p) + if err != nil { + return nil, err + } + + return json.Marshal(env) +} + +type wrappedMultiVerifier struct { + vLAdapters []dsse.Verifier + threshold int + payloadType string +} + +// WrapMultiVerifier returns a signature.Verifier that uses the DSSE encoding format +func WrapMultiVerifier(payloadType string, threshold int, vL ...signature.Verifier) signature.Verifier { + verifierAdapterL := make([]dsse.Verifier, 0, len(vL)) + for _, v := range vL { + pub, err := v.PublicKey() + if err != nil { + return nil + } + + keyID, err := dsse.SHA256KeyID(pub) + if err != nil { + keyID = "" + } + + verifierAdapter := &VerifierAdapter{ + SignatureVerifier: v, + Pub: v.PublicKey, + PubKeyID: keyID, // We do not want to limit verification to a specific key. + } + + verifierAdapterL = append(verifierAdapterL, verifierAdapter) + } + + return &wrappedMultiVerifier{ + vLAdapters: verifierAdapterL, + payloadType: payloadType, + threshold: threshold, + } +} + +// PublicKey returns the public key associated with the signer +func (wL *wrappedMultiVerifier) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return nil, errors.New("not supported for multi signatures") +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (wL *wrappedMultiVerifier) VerifySignature(s, _ io.Reader, _ ...signature.VerifyOption) error { + sig, err := io.ReadAll(s) + if err != nil { + return err + } + + env := dsse.Envelope{} + if err := json.Unmarshal(sig, &env); err != nil { + return err + } + + envVerifier, err := dsse.NewMultiEnvelopeVerifier(wL.threshold, wL.vLAdapters...) + if err != nil { + return err + } + + _, err = envVerifier.Verify(context.Background(), &env) + return err +} + +// WrapMultiSignerVerifier returns a signature.SignerVerifier that uses the DSSE encoding format +func WrapMultiSignerVerifier(payloadType string, threshold int, svL ...signature.SignerVerifier) signature.SignerVerifier { + signerL := make([]signature.Signer, 0, len(svL)) + verifierL := make([]signature.Verifier, 0, len(svL)) + for _, sv := range svL { + signerL = append(signerL, sv) + verifierL = append(verifierL, sv) + } + + sL := WrapMultiSigner(payloadType, signerL...) + vL := WrapMultiVerifier(payloadType, threshold, verifierL...) + + return &wrappedMultiSignerVerifier{ + signer: sL, + verifier: vL, + } +} + +type wrappedMultiSignerVerifier struct { + signer signature.Signer + verifier signature.Verifier +} + +// PublicKey returns the public key associated with the verifier +func (w *wrappedMultiSignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) { + return w.signer.PublicKey(opts...) +} + +// VerifySignature verifies the signature specified in an DSSE envelope +func (w *wrappedMultiSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error { + return w.verifier.VerifySignature(s, r, opts...) +} + +// SignMessage signs the provided stream in the reader using the DSSE encoding format +func (w *wrappedMultiSignerVerifier) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) { + return w.signer.SignMessage(r, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go index 1122989ff65..50f432798d3 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -31,9 +31,6 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature/options" - - // these ensure we have the implementations loaded - _ "golang.org/x/crypto/sha3" ) // Signer creates digital signatures over a message using a specified key pair diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md b/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md new file mode 100644 index 00000000000..016e119f445 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md @@ -0,0 +1,132 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish +to make via an [issue](https://github.com/sigstore/timestamp-authority/issues). + +## Pull Request Process + +1. Create an [issue](https://github.com/sigstore/timestamp-authority/issues) + outlining the fix or feature. +2. Fork the {project-name} repository to your own github account and clone it locally. +3. Hack on your changes. +4. Update the README.md with details of changes to any interface, this includes new environment + variables, exposed ports, useful file locations, CLI parameters and + new or changed configuration values. +5. Correctly format your commit message see [Commit Messages](#commit-message-guidelines) + below. +6. Ensure that CI passes, if it fails, fix the failures. +7. Every pull request requires a review from the [core timestamp-authority team](https://github.com/orgs/github.com/sigstore/teams/tsa-codeowners) + before merging. +8. If your pull request consists of more than one commit, please squash your + commits as described in [Squash Commits](#squash-commits) + +## Commit Message Guidelines + +We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article](https://chris.beams.io/posts/git-commit/). + +Well formed commit messages not only help reviewers understand the nature of +the Pull Request, but also assists the release process where commit messages +are used to generate release notes. + +A good example of a commit message would be as follows: + +```text +Summarize changes in around 50 characters or less + +More detailed explanatory text, if necessary. Wrap it to about 72 +characters or so. In some contexts, the first line is treated as the +subject of the commit and the rest of the text as the body. The +blank line separating the summary from the body is critical (unless +you omit the body entirely); various tools like `log`, `shortlog` +and `rebase` can get confused if you run the two together. + +Explain the problem that this commit is solving. Focus on why you +are making this change as opposed to how (the code explains that). +Are there side effects or other unintuitive consequences of this +change? Here's the place to explain them. + +Further paragraphs come after blank lines. + + - Bullet points are okay, too + + - Typically a hyphen or asterisk is used for the bullet, preceded + by a single space, with blank lines in between, but conventions + vary here + +If you use an issue tracker, put references to them at the bottom, +like this: + +Resolves: #123 +See also: #456, #789 +``` + +Note the `Resolves #123` tag, this references the issue raised and allows us to +ensure issues are associated and closed when a pull request is merged. + +Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) for a complete list of issue references. + +## Squash Commits + +Should your pull request consist of more than one commit (perhaps due to +a change being requested during the review cycle), please perform a git squash +once a reviewer has approved your pull request. + +A squash can be performed as follows. Let's say you have the following commits: + +```text +initial commit +second commit +final commit +``` + +Run the command below with the number set to the total commits you wish to +squash (in our case 3 commits): + +```shell +git rebase -i HEAD~3 +``` + +You default text editor will then open up and you will see the following: + +```shell +pick eb36612 initial commit +pick 9ac8968 second commit +pick a760569 final commit + +# Rebase eb1429f..a760569 onto eb1429f (3 commands) +``` + +We want to rebase on top of our first commit, so we change the other two commits +to `squash`: + +```shell +pick eb36612 initial commit +squash 9ac8968 second commit +squash a760569 final commit +``` + +After this, should you wish to update your commit message to better summarise +all of your pull request, run: + +```shell +git commit --amend +``` + +You will then need to force push (assuming your initial commit(s) were posted +to github): + +```shell +git push origin your-branch --force +``` + +Alternatively, a core member can squash your commits within Github. + +## DCO Signoff + +Make sure to sign the [Developer Certificate of +Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff). + +## Code of Conduct + +Sigstore Timestamp-Authority adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. +Please take a moment to read the [CODE_OF_CONDUCT.md](/CODE_OF_CONDUCT.md) document. diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt b/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt new file mode 100644 index 00000000000..5f2c003d6d1 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2022 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE b/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE new file mode 100644 index 00000000000..f49a4e16e68 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go new file mode 100644 index 00000000000..4f6c77c7925 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go @@ -0,0 +1,340 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verification + +import ( + "bytes" + "crypto/x509" + "encoding/asn1" + "fmt" + "hash" + "io" + "math/big" + + "github.com/digitorus/pkcs7" + "github.com/digitorus/timestamp" + "github.com/pkg/errors" +) + +var ( + // EKUOID is the Extended Key Usage OID, per RFC 5280 + EKUOID = asn1.ObjectIdentifier{2, 5, 29, 37} +) + +// VerifyOpts contains verification options for a RFC3161 timestamp +type VerifyOpts struct { + // OID verifies that the TSR's OID has an expected value. Optional, used when + // an alternative OID was passed with a request to the TSA + OID asn1.ObjectIdentifier + // TSACertificate verifies that the TSR uses the TSACertificate as expected. Optional if the TSR contains the TSA certificate + TSACertificate *x509.Certificate + // Intermediates verifies the TSR's certificate. Optional, used for chain building + Intermediates []*x509.Certificate + // Roots is the set of trusted root certificates that verifies the TSR's certificate + Roots []*x509.Certificate + // Nonce verifies that the TSR contains the expected nonce. Optional, used when + // an optional nonce was passed with a request to the TSA + Nonce *big.Int + // CommonName verifies that the TSR certificate subject's Common Name matches the expected value. Optional + CommonName string +} + +// Verify the TSR's certificate identifier matches a provided TSA certificate +func verifyESSCertID(tsaCert *x509.Certificate, opts VerifyOpts) error { + if opts.TSACertificate == nil { + return nil + } + + if !bytes.Equal(opts.TSACertificate.RawIssuer, tsaCert.RawIssuer) { + return fmt.Errorf("TSR cert issuer does not match provided TSA cert issuer") + } + + if opts.TSACertificate.SerialNumber.Cmp(tsaCert.SerialNumber) != 0 { + return fmt.Errorf("TSR cert serial number does not match provided TSA cert serial number") + } + + return nil +} + +// Verify the leaf certificate's subject Common Name matches a provided Common Name +func verifySubjectCommonName(cert *x509.Certificate, opts VerifyOpts) error { + if opts.CommonName == "" { + return nil + } + + if cert.Subject.CommonName != opts.CommonName { + return fmt.Errorf("the certificate's subject Common Name %s does not match the provided Common Name %s", cert.Subject.CommonName, opts.CommonName) + } + return nil +} + +// If embedded in the TSR, verify the TSR's leaf certificate matches a provided TSA certificate +func verifyEmbeddedLeafCert(tsaCert *x509.Certificate, opts VerifyOpts) error { + if opts.TSACertificate != nil && !opts.TSACertificate.Equal(tsaCert) { + return fmt.Errorf("certificate embedded in the TSR does not match the provided TSA certificate") + } + return nil +} + +// Verify the leaf's EKU is set to critical, per RFC 3161 2.3 +func verifyLeafCertCriticalEKU(cert *x509.Certificate) error { + var criticalEKU bool + for _, ext := range cert.Extensions { + if ext.Id.Equal(EKUOID) { + criticalEKU = ext.Critical + break + } + } + if !criticalEKU { + return errors.New("certificate must set EKU to critical") + } + return nil +} + +func verifyLeafCert(ts timestamp.Timestamp, opts VerifyOpts) error { + if len(ts.Certificates) == 0 && opts.TSACertificate == nil { + return fmt.Errorf("leaf certificate must be present the in TSR or as a verify option") + } + + errMsg := "failed to verify TSA certificate" + + var leafCert *x509.Certificate + if len(ts.Certificates) != 0 { + for _, c := range ts.Certificates { + if !c.IsCA { + leafCert = c + break + } + } + if leafCert == nil { + return fmt.Errorf("no leaf certificate found in chain") + } + + err := verifyEmbeddedLeafCert(leafCert, opts) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + } else { + leafCert = opts.TSACertificate + } + + err := verifyLeafCertCriticalEKU(leafCert) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + + err = verifyESSCertID(leafCert, opts) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + + err = verifySubjectCommonName(leafCert, opts) + if err != nil { + return fmt.Errorf("%s: %w", errMsg, err) + } + + // verifies that the leaf certificate and any intermediate certificates + // have EKU set to only time stamping usage + err = verifyLeafAndIntermediatesTimestampingEKU(leafCert, opts) + if err != nil { + return fmt.Errorf("failed to verify EKU on leaf certificate: %w", err) + } + + return nil +} + +func verifyLeafExtendedKeyUsage(cert *x509.Certificate) error { + certEKULen := len(cert.ExtKeyUsage) + if certEKULen != 1 { + return fmt.Errorf("certificate has %d extended key usages, expected only one", certEKULen) + } + + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageTimeStamping { + return fmt.Errorf("leaf certificate EKU is not set to TimeStamping as required") + } + return nil +} + +func verifyIntermediateExtendedKeyUsage(cert *x509.Certificate) error { + // If no EKU specified it means unrestricted usage + if len(cert.ExtKeyUsage) == 0 { + return nil + } + + allowsTimestampingUse := false + for _, eku := range cert.ExtKeyUsage { + if eku == x509.ExtKeyUsageTimeStamping || eku == x509.ExtKeyUsageAny { + allowsTimestampingUse = true + break + } + } + + if !allowsTimestampingUse { + return errors.New("intermediate certificate does not allow Timestamping usage") + } + + return nil +} + +// Verify the leaf and intermediate certificates (called "EKU chaining") all +// have the appropriate extended key usage set. +// Leaf certificates must have exactly one EKU set to Timestamping +// Intermediates can have no EKU (unrestricted) or multiple EKUs, +// which need to include Timestamping or UsageAny. +func verifyLeafAndIntermediatesTimestampingEKU(leafCert *x509.Certificate, opts VerifyOpts) error { + err := verifyLeafExtendedKeyUsage(leafCert) + if err != nil { + return fmt.Errorf("failed to verify EKU on leaf certificate: %w", err) + } + + for _, cert := range opts.Intermediates { + err := verifyIntermediateExtendedKeyUsage(cert) + if err != nil { + return fmt.Errorf("failed to verify EKU on intermediate certificate: %w", err) + } + } + return nil +} + +// Verify the OID of the TSR matches an expected OID +func verifyOID(oid []int, opts VerifyOpts) error { + if opts.OID == nil { + return nil + } + responseOID := opts.OID + if len(oid) != len(responseOID) { + return fmt.Errorf("OID lengths do not match") + } + for i, v := range oid { + if v != responseOID[i] { + return fmt.Errorf("OID content does not match") + } + } + return nil +} + +// Verify the nonce - Mostly important for when the response is first returned +func verifyNonce(requestNonce *big.Int, opts VerifyOpts) error { + if opts.Nonce == nil { + return nil + } + if opts.Nonce.Cmp(requestNonce) != 0 { + return fmt.Errorf("incoming nonce %d does not match TSR nonce %d", requestNonce, opts.Nonce) + } + return nil +} + +// VerifyTimestampResponse the timestamp response using a timestamp certificate chain. +func VerifyTimestampResponse(tsrBytes []byte, artifact io.Reader, opts VerifyOpts) (*timestamp.Timestamp, error) { + // Verify the status of the TSR does not contain an error + // handled by the timestamp.ParseResponse function + ts, err := timestamp.ParseResponse(tsrBytes) + if err != nil { + pe := timestamp.ParseError("") + if errors.As(err, &pe) { + return nil, fmt.Errorf("timestamp response is not valid: %w", err) + } + return nil, fmt.Errorf("error parsing response into Timestamp: %w", err) + } + + // verify the timestamp response signature using the provided certificate pool + if err = verifyTSRWithChain(ts, opts); err != nil { + return nil, err + } + + if err = verifyNonce(ts.Nonce, opts); err != nil { + return nil, err + } + + if err = verifyOID(ts.Policy, opts); err != nil { + return nil, err + } + + if err = verifyLeafCert(*ts, opts); err != nil { + return nil, err + } + + // verify the hash in the timestamp response matches the artifact hash + if err = verifyHashedMessages(ts.HashAlgorithm.New(), ts.HashedMessage, artifact); err != nil { + return nil, err + } + + // if the parsed timestamp is verified, return the timestamp + return ts, nil +} + +func verifyTSRWithChain(ts *timestamp.Timestamp, opts VerifyOpts) error { + p7Message, err := pkcs7.Parse(ts.RawToken) + if err != nil { + return fmt.Errorf("error parsing hashed message: %w", err) + } + + if len(opts.Roots) == 0 { + return fmt.Errorf("no root certificates provided for verifying the certificate chain") + } + rootCertPool := x509.NewCertPool() + for _, cert := range opts.Roots { + if cert != nil { + rootCertPool.AddCert(cert) + } + } + if rootCertPool.Equal(x509.NewCertPool()) { + return fmt.Errorf("no valid root certificates provided for verifying the certificate chain") + } + intermediateCertPool := x509.NewCertPool() + for _, cert := range opts.Intermediates { + if cert != nil { + intermediateCertPool.AddCert(cert) + } + } + + x509Opts := x509.VerifyOptions{ + Roots: rootCertPool, + Intermediates: intermediateCertPool, + } + + // if the PCKS7 object does not have any certificates set in the + // Certificates field, the VerifyWithChain method will because it will be + // unable to find a leaf certificate associated with a signer. Since the + // leaf certificate issuer and serial number information is already part of + // the PKCS7 object, adding the leaf certificate to the Certificates field + // will allow verification to pass + if p7Message.Certificates == nil && opts.TSACertificate != nil { + p7Message.Certificates = []*x509.Certificate{opts.TSACertificate} + } + + err = p7Message.VerifyWithOpts(x509Opts) + if err != nil { + return fmt.Errorf("error while verifying with chain: %w", err) + } + + return nil +} + +// Verify that the TSR's hashed message matches the digest of the artifact to be timestamped +func verifyHashedMessages(hashAlg hash.Hash, hashedMessage []byte, artifactReader io.Reader) error { + h := hashAlg + if _, err := io.Copy(h, artifactReader); err != nil { + return fmt.Errorf("failed to create hash %w", err) + } + localHashedMsg := h.Sum(nil) + + if !bytes.Equal(localHashedMsg, hashedMessage) { + return fmt.Errorf("hashed messages don't match") + } + + return nil +} diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go new file mode 100644 index 00000000000..ba7511d7551 --- /dev/null +++ b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verification + +import ( + "crypto" + "fmt" + + "github.com/digitorus/timestamp" + "github.com/pkg/errors" +) + +var ErrWeakHashAlg = errors.New("weak hash algorithm: must be SHA-256, SHA-384, or SHA-512") +var ErrUnsupportedHashAlg = errors.New("unsupported hash algorithm") +var ErrInconsistentDigestLength = errors.New("digest length inconsistent with specified hash algorithm") + +func VerifyRequest(ts *timestamp.Request) error { + // only SHA-1, SHA-256, SHA-384, and SHA-512 are supported by the underlying library + switch ts.HashAlgorithm { + case crypto.SHA1: + return ErrWeakHashAlg + case crypto.SHA256, crypto.SHA384, crypto.SHA512: + default: + return ErrUnsupportedHashAlg + } + + expectedDigestLength := ts.HashAlgorithm.Size() + actualDigestLength := len(ts.HashedMessage) + + if actualDigestLength != expectedDigestLength { + return fmt.Errorf("%w: expected %d bytes, got %d bytes", ErrInconsistentDigestLength, expectedDigestLength, actualDigestLength) + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml index 65dc2850377..792db361813 100644 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -1,40 +1,67 @@ +version: "2" run: - # do not run on test files yet tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - linters: enable: - - megacheck - - govet + - asasalint + - asciicheck + - bidichk + - bodyclose + - contextcheck + - durationcheck + - errchkjson + - errorlint + - exhaustive + - gocheckcompilerdirectives + - gochecksumtype + - gosec + - gosmopolitan + - loggercheck + - makezero + - musttag + - nilerr + - nilnesserr + - noctx + - protogetter + - reassign + - recvcheck + - rowserrcheck + - spancheck + - sqlclosecheck + - testifylint + - unparam + - zerologlint disable: - - maligned - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false + settings: + errcheck: + check-type-assertions: false + check-blank: false + lll: + line-length: 100 + tab-width: 4 + prealloc: + simple: false + range-loops: false + for-loops: false + whitespace: + multi-if: false + multi-func: false + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 7567f612898..098608ff4b4 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -37,7 +37,7 @@ Features: # 1.6.0 Fixes: * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * revert the entry concurrency bug fix which leads to deadlock under some circumstances * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 Features: @@ -129,7 +129,7 @@ This new release introduces: which is mostly useful for logger wrapper * a fix reverting the immutability of the entry given as parameter to the hooks a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary + in a nested dictionary * a new SetOutput method in the Logger * a new configuration of the textformatter to configure the name of the default keys * a new configuration of the text formatter to disable the level truncation diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index d1d4a85fd75..cc5dab7eb78 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -40,7 +40,7 @@ plain text): ![Colored](http://i.imgur.com/PY7qMwd.png) -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +With `logrus.SetFormatter(&logrus.JSONFormatter{})`, for easy parsing by logstash or Splunk: ```text @@ -60,9 +60,9 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} "time":"2014-03-10 19:57:38.562543128 -0400 EDT"} ``` -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +With the default `logrus.SetFormatter(&logrus.TextFormatter{})` when a TTY is not attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: +[logfmt](https://pkg.go.dev/github.com/kr/logfmt) format: ```text time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 @@ -75,17 +75,18 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822 To ensure this behaviour even if a TTY is attached, set your formatter as follows: ```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) +logrus.SetFormatter(&logrus.TextFormatter{ + DisableColors: true, + FullTimestamp: true, +}) ``` #### Logging Method Name If you wish to add the calling method as a field, instruct the logger via: + ```go -log.SetReportCaller(true) +logrus.SetReportCaller(true) ``` This adds the caller as 'method' like so: @@ -100,11 +101,11 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your environment via benchmarks: -``` + +```bash go test -bench=.*CallerTracing ``` - #### Case-sensitivity The organization's name was changed to lower-case--and this will not be changed @@ -118,12 +119,10 @@ The simplest way to use Logrus is simply the package-level exported logger: ```go package main -import ( - log "github.com/sirupsen/logrus" -) +import "github.com/sirupsen/logrus" func main() { - log.WithFields(log.Fields{ + logrus.WithFields(logrus.Fields{ "animal": "walrus", }).Info("A walrus appears") } @@ -139,6 +138,7 @@ package main import ( "os" + log "github.com/sirupsen/logrus" ) @@ -190,26 +190,27 @@ package main import ( "os" + "github.com/sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() +var logger = logrus.New() func main() { // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout + // exported logger. See Godoc. + logger.Out = os.Stdout // You could set this to any `io.Writer` such as a file // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) // if err == nil { - // log.Out = file + // logger.Out = file // } else { - // log.Info("Failed to log to file, using default stderr") + // logger.Info("Failed to log to file, using default stderr") // } - log.WithFields(logrus.Fields{ + logger.WithFields(logrus.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") @@ -219,12 +220,12 @@ func main() { #### Fields Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +long, unparseable error messages. For example, instead of: `logrus.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: ```go -log.WithFields(log.Fields{ +logrus.WithFields(logrus.Fields{ "event": event, "topic": topic, "key": key, @@ -245,12 +246,12 @@ seen as a hint you should add a field, however, you can still use the Often it's helpful to have fields _always_ attached to log statements in an application or parts of one. For example, you may want to always log the `request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +`logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip})` on every line, you can create a `logrus.Entry` to pass around instead: ```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger := logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") // will log request_id and user_ip requestLogger.Warn("something not great happened") ``` @@ -264,28 +265,31 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in `init`: ```go +package main + import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" + + "github.com/sirupsen/logrus" + airbrake "gopkg.in/gemnasium/logrus-airbrake-hook.v2" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" ) func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) + logrus.AddHook(airbrake.NewHook(123, "xyz", "production")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { - log.Error("Unable to connect to local syslog daemon") + logrus.Error("Unable to connect to local syslog daemon") } else { - log.AddHook(hook) + logrus.AddHook(hook) } } ``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). +Note: Syslog hooks also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) @@ -295,15 +299,15 @@ A list of currently known service hooks can be found in this wiki [page](https:/ Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") +logrus.Trace("Something very low level.") +logrus.Debug("Useful debugging information.") +logrus.Info("Something noteworthy happened!") +logrus.Warn("You should probably take a look at this.") +logrus.Error("Something failed but I'm not quitting.") // Calls os.Exit(1) after logging -log.Fatal("Bye.") +logrus.Fatal("Bye.") // Calls panic() after logging -log.Panic("I'm bailing.") +logrus.Panic("I'm bailing.") ``` You can set the logging level on a `Logger`, then it will only log entries with @@ -311,13 +315,13 @@ that severity or anything above it: ```go // Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) +logrus.SetLevel(logrus.InfoLevel) ``` -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +It may be useful to set `logrus.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. -Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). +Note: If you want different log levels for global (`logrus.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). #### Entries @@ -340,17 +344,17 @@ could do: ```go import ( - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" ) func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) + logrus.SetFormatter(&logrus.JSONFormatter{}) } else { // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) + logrus.SetFormatter(&logrus.TextFormatter{}) } } ``` @@ -372,11 +376,11 @@ The built-in logging formatters are: * When colors are enabled, levels are truncated to 4 characters by default. To disable truncation set the `DisableLevelTruncation` field to `true`. * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). + * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#JSONFormatter). -Third party logging formatters: +Third-party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. * [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). @@ -384,7 +388,7 @@ Third party logging formatters: * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. * [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Save log to files. * [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. You can define your formatter by implementing the `Formatter` interface, @@ -393,10 +397,9 @@ requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a default ones (see Entries section above): ```go -type MyJSONFormatter struct { -} +type MyJSONFormatter struct{} -log.SetFormatter(new(MyJSONFormatter)) +logrus.SetFormatter(new(MyJSONFormatter)) func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { // Note this doesn't include Time, Level and Message which are available on @@ -455,17 +458,18 @@ entries. It should not be a feature of the application-level logger. #### Testing -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: +Logrus has a built-in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: * decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go import( + "testing" + "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" - "testing" ) func TestSomething(t*testing.T){ @@ -486,15 +490,15 @@ func TestSomething(t*testing.T){ Logrus can register one or more functions that will be called when any `fatal` level message is logged. The registered handlers will be executed before logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. +to gracefully shut down. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. -``` -... +```go +// ... handler := func() { - // gracefully shutdown something... + // gracefully shut down something... } logrus.RegisterExitHandler(handler) -... +// ... ``` #### Thread safety @@ -502,7 +506,7 @@ logrus.RegisterExitHandler(handler) By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. -Situation when locking is not needed includes: +Situations when locking is not needed include: * You have no hooks registered, or hooks calling is already thread-safe. diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml index df9d65c3a5b..e90f09ea68c 100644 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -1,14 +1,12 @@ -version: "{build}" +# Minimal stub to satisfy AppVeyor CI +version: 1.0.{build} platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath +shallow_clone: true + branches: only: - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version + - main + build_script: - - go get -t - - go test + - echo "No-op build to satisfy AppVeyor CI" diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 71cdbbc35d2..71d796d0b13 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -34,13 +34,15 @@ func init() { minimumCallerDepth = 1 } -// Defines the key when adding errors using WithError. +// ErrorKey defines the key when adding errors using [WithError], [Logger.WithError]. var ErrorKey = "error" -// An entry is the final or intermediate Logrus logging entry. It contains all +// Entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Trace, Debug, // Info, Warn, Error, Fatal or Panic is called on it. These objects can be // reused and passed around as much as you wish to avoid field duplication. +// +//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver. type Entry struct { Logger *Logger @@ -86,12 +88,12 @@ func (entry *Entry) Dup() *Entry { return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} } -// Returns the bytes representation of this entry from the formatter. +// Bytes returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { return entry.Logger.Formatter.Format(entry) } -// Returns the string representation from the reader and ultimately the +// String returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { serialized, err := entry.Bytes() @@ -102,12 +104,13 @@ func (entry *Entry) String() (string, error) { return str, nil } -// Add an error as single field (using the key defined in ErrorKey) to the Entry. +// WithError adds an error as single field (using the key defined in [ErrorKey]) +// to the Entry. func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } -// Add a context to the Entry. +// WithContext adds a context to the Entry. func (entry *Entry) WithContext(ctx context.Context) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { @@ -116,12 +119,12 @@ func (entry *Entry) WithContext(ctx context.Context) *Entry { return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} } -// Add a single field to the Entry. +// WithField adds a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) } -// Add a map of fields to the Entry. +// WithFields adds a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { data := make(Fields, len(entry.Data)+len(fields)) for k, v := range entry.Data { @@ -150,7 +153,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry { return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} } -// Overrides the time of the Entry. +// WithTime overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { @@ -204,7 +207,7 @@ func getCaller() *runtime.Frame { // If the caller isn't part of this package, we're done if pkg != logrusPackage { - return &f //nolint:scopelint + return &f } } @@ -432,7 +435,7 @@ func (entry *Entry) Panicln(args ...interface{}) { entry.Logln(PanicLevel, args...) } -// Sprintlnn => Sprint no newline. This is to get the behavior of how +// sprintlnn => Sprint no newline. This is to get the behavior of how // fmt.Sprintln where spaces are always added between operands, regardless of // their type. Instead of vendoring the Sprintln implementation to spare a // string allocation, we do the simplest thing. diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go index 3f151cdc392..9ab978a4578 100644 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -1,16 +1,16 @@ package logrus -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not +// Hook describes hooks to be fired when logging on the logging levels returned from +// [Hook.Levels] on your implementation of the interface. Note that this is not // fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for +// functionality yourself if your call is non-blocking, and you don't wish for // the logging calls for levels returned from `Levels()` to block. type Hook interface { Levels() []Level Fire(*Entry) error } -// Internal type for storing the hooks on a logger instance. +// LevelHooks is an internal type for storing the hooks on a logger instance. type LevelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 5ff0aef6d3f..f5b8c439ee8 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -72,16 +72,16 @@ func (mw *MutexWrap) Disable() { mw.disabled = true } -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just +// New Creates a new logger. Configuration should be set by changing [Formatter], +// Out and Hooks directly on the default Logger instance. You can also just // instantiate your own: // -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } // // It's recommended to make this a global instance called `log`. func New() *Logger { @@ -118,30 +118,30 @@ func (logger *Logger) WithField(key string, value interface{}) *Entry { return entry.WithField(key, value) } -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. +// WithFields adds a struct of fields to the log entry. It calls [Entry.WithField] +// for each Field. func (logger *Logger) WithFields(fields Fields) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithFields(fields) } -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. +// WithError adds an error as single field to the log entry. It calls +// [Entry.WithError] for the given error. func (logger *Logger) WithError(err error) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithError(err) } -// Add a context to the log entry. +// WithContext add a context to the log entry. func (logger *Logger) WithContext(ctx context.Context) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithContext(ctx) } -// Overrides the time of the log entry. +// WithTime overrides the time of the log entry. func (logger *Logger) WithTime(t time.Time) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) @@ -347,9 +347,9 @@ func (logger *Logger) Exit(code int) { logger.ExitFunc(code) } -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. +// SetNoLock disables the lock for situations where a file is opened with +// appending mode, and safe for concurrent writes to the file (within 4k +// message on Linux). In these cases user can choose to disable the lock. func (logger *Logger) SetNoLock() { logger.mu.Disable() } diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index 2f16224cb9f..37fc4fef85a 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -6,13 +6,15 @@ import ( "strings" ) -// Fields type, used to pass to `WithFields`. +// Fields type, used to pass to [WithFields]. type Fields map[string]interface{} // Level type +// +//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver. type Level uint32 -// Convert the Level to a string. E.g. PanicLevel becomes "panic". +// Convert the Level to a string. E.g. [PanicLevel] becomes "panic". func (level Level) String() string { if b, err := level.MarshalText(); err == nil { return string(b) @@ -77,7 +79,7 @@ func (level Level) MarshalText() ([]byte, error) { return nil, fmt.Errorf("not a valid logrus level %d", level) } -// A constant exposing all logging levels +// AllLevels exposing all logging levels. var AllLevels = []Level{ PanicLevel, FatalLevel, @@ -119,8 +121,8 @@ var ( ) // StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. +// it'll accept a stdlib logger ([log.Logger]) and a logrus logger. +// There's no standard interface, so this is the closest we get, unfortunately. type StdLogger interface { Print(...interface{}) Printf(string, ...interface{}) @@ -135,7 +137,8 @@ type StdLogger interface { Panicln(...interface{}) } -// The FieldLogger interface generalizes the Entry and Logger types +// FieldLogger extends the [StdLogger] interface, generalizing +// the [Entry] and [Logger] types. type FieldLogger interface { WithField(key string, value interface{}) *Entry WithFields(fields Fields) *Entry @@ -176,8 +179,9 @@ type FieldLogger interface { // IsPanicEnabled() bool } -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. +// Ext1FieldLogger (the first extension to [FieldLogger]) is superfluous, it is +// here for consistency. Do not use. Use [FieldLogger], [Logger] or [Entry] +// instead. type Ext1FieldLogger interface { FieldLogger Tracef(format string, args ...interface{}) diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index be2c6efe5ed..6dfeb18b10e 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -306,6 +306,7 @@ func (f *TextFormatter) needsQuoting(text string) bool { return false } for _, ch := range text { + //nolint:staticcheck // QF1001: could apply De Morgan's law if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || @@ -334,6 +335,6 @@ func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { if !f.needsQuoting(stringVal) { b.WriteString(stringVal) } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) + fmt.Fprintf(b, "%q", stringVal) } } diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE new file mode 100644 index 00000000000..85541be2e1b --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 The Update Framework Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE new file mode 100644 index 00000000000..09005219963 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE @@ -0,0 +1,9 @@ +Copyright 2024 The Update Framework Authors + +Apache 2.0 License +Copyright 2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (/). + +SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go new file mode 100644 index 00000000000..2123e57dfc3 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go @@ -0,0 +1,144 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package config + +import ( + "fmt" + "net/http" + "net/url" + "os" + "time" + + "github.com/cenkalti/backoff/v5" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +type UpdaterConfig struct { + // TUF configuration + MaxRootRotations int64 + MaxDelegations int + RootMaxLength int64 + TimestampMaxLength int64 + SnapshotMaxLength int64 + TargetsMaxLength int64 + // Updater configuration + Fetcher fetcher.Fetcher + LocalTrustedRoot []byte + LocalMetadataDir string + LocalTargetsDir string + RemoteMetadataURL string + RemoteTargetsURL string + DisableLocalCache bool + PrefixTargetsWithHash bool + // UnsafeLocalMode only uses the metadata as written on disk + // if the metadata is incomplete, calling updater.Refresh will fail + UnsafeLocalMode bool +} + +// New creates a new UpdaterConfig instance used by the Updater to +// store configuration +func New(remoteURL string, rootBytes []byte) (*UpdaterConfig, error) { + // Default URL for target files - /targets + targetsURL, err := url.JoinPath(remoteURL, "targets") + if err != nil { + return nil, err + } + + return &UpdaterConfig{ + // TUF configuration + MaxRootRotations: 256, + MaxDelegations: 32, + RootMaxLength: 512000, // bytes + TimestampMaxLength: 16384, // bytes + SnapshotMaxLength: 2000000, // bytes + TargetsMaxLength: 5000000, // bytes + // Updater configuration + Fetcher: fetcher.NewDefaultFetcher(), // use the default built-in download fetcher + LocalTrustedRoot: rootBytes, // trusted root.json + RemoteMetadataURL: remoteURL, // URL of where the TUF metadata is + RemoteTargetsURL: targetsURL, // URL of where the target files should be downloaded from + DisableLocalCache: false, // enable local caching of trusted metadata + PrefixTargetsWithHash: true, // use hash-prefixed target files with consistent snapshots + UnsafeLocalMode: false, + }, nil +} + +func (cfg *UpdaterConfig) EnsurePathsExist() error { + if cfg.DisableLocalCache { + return nil + } + + for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + return err + } + } + + return nil +} + +func (cfg *UpdaterConfig) SetDefaultFetcherHTTPClient(client *http.Client) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring a http.Client value for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + df.SetHTTPClient(client) + cfg.Fetcher = df + return nil +} + +func (cfg *UpdaterConfig) SetDefaultFetcherTransport(rt http.RoundTripper) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring a Transport value for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + if err := df.SetTransport(rt); err != nil { + return err + } + cfg.Fetcher = df + return nil +} + +// SetDefaultFetcherRetry sets the constant retry interval and count for the default fetcher +func (cfg *UpdaterConfig) SetDefaultFetcherRetry(retryInterval time.Duration, retryCount uint) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring the retry interval and count for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + df.SetRetry(retryInterval, retryCount) + cfg.Fetcher = df + return nil +} + +func (cfg *UpdaterConfig) SetRetryOptions(retryOptions ...backoff.RetryOption) error { + // Check if the configured fetcher is the default fetcher + // since we are only configuring retry options for the default fetcher + df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + df.SetRetryOptions(retryOptions...) + cfg.Fetcher = df + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go new file mode 100644 index 00000000000..3bd8e5ea718 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go @@ -0,0 +1,246 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "fmt" +) + +// Define TUF error types used inside the new modern implementation. +// The names chosen for TUF error types should start in 'Err' except where +// there is a good reason not to, and provide that reason in those cases. + +// Repository errors + +// ErrRepository - an error with a repository's state, such as a missing file. +// It covers all exceptions that come from the repository side when +// looking from the perspective of users of metadata API or client +type ErrRepository struct { + Msg string +} + +func (e *ErrRepository) Error() string { + return fmt.Sprintf("repository error: %s", e.Msg) +} + +func (e *ErrRepository) Is(target error) bool { + _, ok := target.(*ErrRepository) + return ok +} + +// ErrUnsignedMetadata - An error about metadata object with insufficient threshold of signatures +type ErrUnsignedMetadata struct { + Msg string +} + +func (e *ErrUnsignedMetadata) Error() string { + return fmt.Sprintf("unsigned metadata error: %s", e.Msg) +} + +// ErrUnsignedMetadata is a subset of ErrRepository +func (e *ErrUnsignedMetadata) Is(target error) bool { + if _, ok := target.(*ErrUnsignedMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrBadVersionNumber - An error for metadata that contains an invalid version number +type ErrBadVersionNumber struct { + Msg string +} + +func (e *ErrBadVersionNumber) Error() string { + return fmt.Sprintf("bad version number error: %s", e.Msg) +} + +// ErrBadVersionNumber is a subset of ErrRepository +func (e *ErrBadVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrEqualVersionNumber - An error for metadata containing a previously verified version number +type ErrEqualVersionNumber struct { + Msg string +} + +func (e *ErrEqualVersionNumber) Error() string { + return fmt.Sprintf("equal version number error: %s", e.Msg) +} + +// ErrEqualVersionNumber is a subset of both ErrRepository and ErrBadVersionNumber +func (e *ErrEqualVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrEqualVersionNumber); ok { + return true + } + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrExpiredMetadata - Indicate that a TUF Metadata file has expired +type ErrExpiredMetadata struct { + Msg string +} + +func (e *ErrExpiredMetadata) Error() string { + return fmt.Sprintf("expired metadata error: %s", e.Msg) +} + +// ErrExpiredMetadata is a subset of ErrRepository +func (e *ErrExpiredMetadata) Is(target error) bool { + if _, ok := target.(*ErrExpiredMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrLengthOrHashMismatch - An error while checking the length and hash values of an object +type ErrLengthOrHashMismatch struct { + Msg string +} + +func (e *ErrLengthOrHashMismatch) Error() string { + return fmt.Sprintf("length/hash verification error: %s", e.Msg) +} + +// ErrLengthOrHashMismatch is a subset of ErrRepository +func (e *ErrLengthOrHashMismatch) Is(target error) bool { + if _, ok := target.(*ErrLengthOrHashMismatch); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// Download errors + +// ErrDownload - An error occurred while attempting to download a file +type ErrDownload struct { + Msg string +} + +func (e *ErrDownload) Error() string { + return fmt.Sprintf("download error: %s", e.Msg) +} + +func (e *ErrDownload) Is(target error) bool { + _, ok := target.(*ErrDownload) + return ok +} + +// ErrDownloadLengthMismatch - Indicate that a mismatch of lengths was seen while downloading a file +type ErrDownloadLengthMismatch struct { + Msg string +} + +func (e *ErrDownloadLengthMismatch) Error() string { + return fmt.Sprintf("download length mismatch error: %s", e.Msg) +} + +// ErrDownloadLengthMismatch is a subset of ErrDownload +func (e *ErrDownloadLengthMismatch) Is(target error) bool { + if _, ok := target.(*ErrDownloadLengthMismatch); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ErrDownloadHTTP - Returned by Fetcher interface implementations for HTTP errors +type ErrDownloadHTTP struct { + StatusCode int + URL string +} + +func (e *ErrDownloadHTTP) Error() string { + return fmt.Sprintf("failed to download %s, http status code: %d", e.URL, e.StatusCode) +} + +// ErrDownloadHTTP is a subset of ErrDownload +func (e *ErrDownloadHTTP) Is(target error) bool { + if _, ok := target.(*ErrDownloadHTTP); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ValueError +type ErrValue struct { + Msg string +} + +func (e *ErrValue) Error() string { + return fmt.Sprintf("value error: %s", e.Msg) +} + +func (e *ErrValue) Is(err error) bool { + _, ok := err.(*ErrValue) + return ok +} + +// TypeError +type ErrType struct { + Msg string +} + +func (e *ErrType) Error() string { + return fmt.Sprintf("type error: %s", e.Msg) +} + +func (e *ErrType) Is(err error) bool { + _, ok := err.(*ErrType) + return ok +} + +// RuntimeError +type ErrRuntime struct { + Msg string +} + +func (e *ErrRuntime) Error() string { + return fmt.Sprintf("runtime error: %s", e.Msg) +} + +func (e *ErrRuntime) Is(err error) bool { + _, ok := err.(*ErrRuntime) + return ok +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go new file mode 100644 index 00000000000..71796b7891d --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go @@ -0,0 +1,172 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package fetcher + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "time" + + "github.com/cenkalti/backoff/v5" + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// httpClient interface allows us to either provide a live http.Client +// or a mock implementation for testing purposes +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// Fetcher interface +type Fetcher interface { + // DownloadFile downloads a file from the provided URL, reading + // up to maxLength of bytes before it aborts. + // The timeout argument is deprecated and not used. To configure + // the timeout (or retries), modify the fetcher instead. For the + // DefaultFetcher the underlying HTTP client can be substituted. + DownloadFile(urlPath string, maxLength int64, _ time.Duration) ([]byte, error) +} + +// DefaultFetcher implements Fetcher +type DefaultFetcher struct { + // httpClient configuration + httpUserAgent string + client httpClient + // retry logic configuration + retryOptions []backoff.RetryOption +} + +func (d *DefaultFetcher) SetHTTPUserAgent(httpUserAgent string) { + d.httpUserAgent = httpUserAgent +} + +// DownloadFile downloads a file from urlPath, errors out if it failed, +// its length is larger than maxLength or the timeout is reached. +func (d *DefaultFetcher) DownloadFile(urlPath string, maxLength int64, _ time.Duration) ([]byte, error) { + req, err := http.NewRequest("GET", urlPath, nil) + if err != nil { + return nil, err + } + // Use in case of multiple sessions. + if d.httpUserAgent != "" { + req.Header.Set("User-Agent", d.httpUserAgent) + } + + // For backwards compatibility, if the client is nil, use the default client. + if d.client == nil { + d.client = http.DefaultClient + } + + operation := func() ([]byte, error) { + // Execute the request. + res, err := d.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + // Handle HTTP status codes. + if res.StatusCode != http.StatusOK { + return nil, &metadata.ErrDownloadHTTP{StatusCode: res.StatusCode, URL: urlPath} + } + var length int64 + // Get content length from header (might not be accurate, -1 or not set). + if header := res.Header.Get("Content-Length"); header != "" { + length, err = strconv.ParseInt(header, 10, 0) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + } + // Although the size has been checked above, use a LimitReader in case + // the reported size is inaccurate, or size is -1 which indicates an + // unknown length. We read maxLength + 1 in order to check if the read data + // surpassed our set limit. + data, err := io.ReadAll(io.LimitReader(res.Body, maxLength+1)) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + length = int64(len(data)) + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + + return data, nil + } + data, err := backoff.Retry(context.Background(), operation, d.retryOptions...) + if err != nil { + return nil, err + } + + return data, nil +} + +func NewDefaultFetcher() *DefaultFetcher { + return &DefaultFetcher{ + client: http.DefaultClient, + // default to attempting the HTTP request once + retryOptions: []backoff.RetryOption{backoff.WithMaxTries(1)}, + } +} + +// NewFetcherWithHTTPClient creates a new DefaultFetcher with a custom httpClient +func (f *DefaultFetcher) NewFetcherWithHTTPClient(hc httpClient) *DefaultFetcher { + return &DefaultFetcher{ + client: hc, + } +} + +// NewFetcherWithRoundTripper creates a new DefaultFetcher with a custom RoundTripper +// The function will create a default http.Client and replace the transport with the provided RoundTripper implementation +func (f *DefaultFetcher) NewFetcherWithRoundTripper(rt http.RoundTripper) *DefaultFetcher { + client := http.DefaultClient + client.Transport = rt + return &DefaultFetcher{ + client: client, + } +} + +func (f *DefaultFetcher) SetHTTPClient(hc httpClient) { + f.client = hc +} + +func (f *DefaultFetcher) SetTransport(rt http.RoundTripper) error { + hc, ok := f.client.(*http.Client) + if !ok { + return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher") + } + hc.Transport = rt + f.client = hc + return nil +} + +func (f *DefaultFetcher) SetRetry(retryInterval time.Duration, retryCount uint) { + constantBackOff := backoff.WithBackOff(backoff.NewConstantBackOff(retryInterval)) + maxTryCount := backoff.WithMaxTries(retryCount) + f.SetRetryOptions(constantBackOff, maxTryCount) +} + +func (f *DefaultFetcher) SetRetryOptions(retryOptions ...backoff.RetryOption) { + f.retryOptions = retryOptions +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go new file mode 100644 index 00000000000..57e38612be1 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +const ( + KeyTypeEd25519 = "ed25519" + KeyTypeECDSA_SHA2_P256_COMPAT = "ecdsa-sha2-nistp256" + KeyTypeECDSA_SHA2_P256 = "ecdsa" + KeyTypeRSASSA_PSS_SHA256 = "rsa" + KeySchemeEd25519 = "ed25519" + KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" + KeySchemeECDSA_SHA2_P384 = "ecdsa-sha2-nistp384" + KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256" +) + +// ToPublicKey generate crypto.PublicKey from metadata type Key +func (k *Key) ToPublicKey() (crypto.PublicKey, error) { + switch k.Type { + case KeyTypeRSASSA_PSS_SHA256: + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + rsaKey, ok := publicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid rsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil { + return nil, err + } + return rsaKey, nil + case KeyTypeECDSA_SHA2_P256, KeyTypeECDSA_SHA2_P256_COMPAT: // handle "ecdsa" too as python-tuf/sslib keys are using it for keytype instead of https://theupdateframework.github.io/specification/latest/index.html#keytype-ecdsa-sha2-nistp256 + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + ecdsaKey, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid ecdsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil { + return nil, err + } + return ecdsaKey, nil + case KeyTypeEd25519: + publicKey, err := hex.DecodeString(k.Value.PublicKey) + if err != nil { + return nil, err + } + ed25519Key := ed25519.PublicKey(publicKey) + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ed25519Key); err != nil { + return nil, err + } + return ed25519Key, nil + } + return nil, fmt.Errorf("unsupported public key type") +} + +// KeyFromPublicKey generate metadata type Key from crypto.PublicKey +func KeyFromPublicKey(k crypto.PublicKey) (*Key, error) { + key := &Key{} + switch k := k.(type) { + case *rsa.PublicKey: + key.Type = KeyTypeRSASSA_PSS_SHA256 + key.Scheme = KeySchemeRSASSA_PSS_SHA256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case *ecdsa.PublicKey: + key.Type = KeyTypeECDSA_SHA2_P256 + key.Scheme = KeySchemeECDSA_SHA2_P256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case ed25519.PublicKey: + key.Type = KeyTypeEd25519 + key.Scheme = KeySchemeEd25519 + key.Value.PublicKey = hex.EncodeToString(k) + default: + return nil, fmt.Errorf("unsupported public key type") + } + return key, nil +} + +// ID returns the keyID value for the given Key +func (k *Key) ID() string { + // the identifier is a hexdigest of the SHA-256 hash of the canonical form of the key + if k.id == "" { + data, err := cjson.EncodeCanonical(k) + if err != nil { + panic(fmt.Errorf("error creating key ID: %w", err)) + } + digest := sha256.Sum256(data) + k.id = hex.EncodeToString(digest[:]) + } + return k.id +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go new file mode 100644 index 00000000000..c7cab38389b --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +var log Logger = DiscardLogger{} + +// Logger partially implements the go-log/logr's interface: +// https://github.com/go-logr/logr/blob/master/logr.go +type Logger interface { + // Info logs a non-error message with key/value pairs + Info(msg string, kv ...any) + // Error logs an error with a given message and key/value pairs. + Error(err error, msg string, kv ...any) +} + +type DiscardLogger struct{} + +func (d DiscardLogger) Info(msg string, kv ...any) { +} + +func (d DiscardLogger) Error(err error, msg string, kv ...any) { +} + +func SetLogger(logger Logger) { + log = logger +} + +func GetLogger() Logger { + return log +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go new file mode 100644 index 00000000000..bd3f1e44bdc --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go @@ -0,0 +1,567 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/hex" + "encoding/json" + "errors" +) + +// The following marshal/unmarshal methods override the default behavior for for each TUF type +// in order to support unrecognized fields + +func (signed RootType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["consistent_snapshot"] = signed.ConsistentSnapshot + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["keys"] = signed.Keys + dict["roles"] = signed.Roles + return json.Marshal(dict) +} + +func (signed *RootType) UnmarshalJSON(data []byte) error { + type Alias RootType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = RootType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "consistent_snapshot") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "keys") + delete(dict, "roles") + signed.UnrecognizedFields = dict + return nil +} + +func (signed SnapshotType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *SnapshotType) UnmarshalJSON(data []byte) error { + type Alias SnapshotType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = SnapshotType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TimestampType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *TimestampType) UnmarshalJSON(data []byte) error { + type Alias TimestampType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TimestampType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetsType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["targets"] = signed.Targets + if signed.Delegations != nil { + dict["delegations"] = signed.Delegations + } + return json.Marshal(dict) +} + +func (signed *TargetsType) UnmarshalJSON(data []byte) error { + type Alias TargetsType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetsType(s) + + // populate the path field for each target + for name, targetFile := range signed.Targets { + targetFile.Path = name + } + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "targets") + delete(dict, "delegations") + signed.UnrecognizedFields = dict + return nil +} + +func (signed MetaFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + // length and hashes are optional + if signed.Length != 0 { + dict["length"] = signed.Length + } + if len(signed.Hashes) != 0 { + dict["hashes"] = signed.Hashes + } + dict["version"] = signed.Version + return json.Marshal(dict) +} + +func (signed *MetaFiles) UnmarshalJSON(data []byte) error { + type Alias MetaFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = MetaFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "version") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["length"] = signed.Length + dict["hashes"] = signed.Hashes + if signed.Custom != nil { + dict["custom"] = signed.Custom + } + return json.Marshal(dict) +} + +func (signed *TargetFiles) UnmarshalJSON(data []byte) error { + type Alias TargetFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "custom") + signed.UnrecognizedFields = dict + return nil +} + +func (key Key) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(key.UnrecognizedFields) != 0 { + copyMapValues(key.UnrecognizedFields, dict) + } + dict["keytype"] = key.Type + dict["scheme"] = key.Scheme + dict["keyval"] = key.Value + return json.Marshal(dict) +} + +func (key *Key) UnmarshalJSON(data []byte) error { + type Alias Key + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + // nolint + *key = Key(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keytype") + delete(dict, "scheme") + delete(dict, "keyval") + key.UnrecognizedFields = dict + return nil +} + +func (meta Metadata[T]) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(meta.UnrecognizedFields) != 0 { + copyMapValues(meta.UnrecognizedFields, dict) + } + dict["signed"] = meta.Signed + dict["signatures"] = meta.Signatures + return json.Marshal(dict) +} + +func (meta *Metadata[T]) UnmarshalJSON(data []byte) error { + tmp := any(new(T)) + var m map[string]any + if err := json.Unmarshal(data, &m); err != nil { + return err + } + switch tmp.(type) { + case *RootType: + dict := struct { + Signed RootType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *SnapshotType: + dict := struct { + Signed SnapshotType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TimestampType: + dict := struct { + Signed TimestampType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TargetsType: + dict := struct { + Signed TargetsType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + default: + return &ErrValue{Msg: "unrecognized metadata type"} + } + delete(m, "signed") + delete(m, "signatures") + meta.UnrecognizedFields = m + return nil +} + +func (s Signature) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(s.UnrecognizedFields) != 0 { + copyMapValues(s.UnrecognizedFields, dict) + } + dict["keyid"] = s.KeyID + dict["sig"] = s.Signature + return json.Marshal(dict) +} + +func (s *Signature) UnmarshalJSON(data []byte) error { + type Alias Signature + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *s = Signature(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyid") + delete(dict, "sig") + s.UnrecognizedFields = dict + return nil +} + +func (kv KeyVal) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(kv.UnrecognizedFields) != 0 { + copyMapValues(kv.UnrecognizedFields, dict) + } + dict["public"] = kv.PublicKey + return json.Marshal(dict) +} + +func (kv *KeyVal) UnmarshalJSON(data []byte) error { + type Alias KeyVal + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *kv = KeyVal(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "public") + kv.UnrecognizedFields = dict + return nil +} + +func (role Role) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + return json.Marshal(dict) +} + +func (role *Role) UnmarshalJSON(data []byte) error { + type Alias Role + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = Role(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + role.UnrecognizedFields = dict + return nil +} + +func (d Delegations) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(d.UnrecognizedFields) != 0 { + copyMapValues(d.UnrecognizedFields, dict) + } + // only one is allowed + dict["keys"] = d.Keys + if d.Roles != nil { + dict["roles"] = d.Roles + } else if d.SuccinctRoles != nil { + dict["succinct_roles"] = d.SuccinctRoles + } + return json.Marshal(dict) +} + +func (d *Delegations) UnmarshalJSON(data []byte) error { + type Alias Delegations + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *d = Delegations(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keys") + delete(dict, "roles") + delete(dict, "succinct_roles") + d.UnrecognizedFields = dict + return nil +} + +func (role DelegatedRole) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["name"] = role.Name + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["terminating"] = role.Terminating + // make sure we have only one of the two (per spec) + if role.Paths != nil && role.PathHashPrefixes != nil { + return nil, &ErrValue{Msg: "failed to marshal: not allowed to have both \"paths\" and \"path_hash_prefixes\" present"} + } + if role.Paths != nil { + dict["paths"] = role.Paths + } else if role.PathHashPrefixes != nil { + dict["path_hash_prefixes"] = role.PathHashPrefixes + } + return json.Marshal(dict) +} + +func (role *DelegatedRole) UnmarshalJSON(data []byte) error { + type Alias DelegatedRole + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = DelegatedRole(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "name") + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "terminating") + delete(dict, "paths") + delete(dict, "path_hash_prefixes") + role.UnrecognizedFields = dict + return nil +} + +func (role SuccinctRoles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["bit_length"] = role.BitLength + dict["name_prefix"] = role.NamePrefix + return json.Marshal(dict) +} + +func (role *SuccinctRoles) UnmarshalJSON(data []byte) error { + type Alias SuccinctRoles + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = SuccinctRoles(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "bit_length") + delete(dict, "name_prefix") + role.UnrecognizedFields = dict + return nil +} + +func (b *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("tuf: invalid JSON hex bytes") + } + res := make([]byte, hex.DecodedLen(len(data)-2)) + _, err := hex.Decode(res, data[1:len(data)-1]) + if err != nil { + return err + } + *b = res + return nil +} + +func (b HexBytes) MarshalJSON() ([]byte, error) { + res := make([]byte, hex.EncodedLen(len(b))+2) + res[0] = '"' + res[len(res)-1] = '"' + hex.Encode(res[1:], b) + return res, nil +} + +func (b HexBytes) String() string { + return hex.EncodeToString(b) +} + +// copyMapValues copies the values of the src map to dst +func copyMapValues(src, dst map[string]any) { + for k, v := range src { + dst[k] = v + } +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go new file mode 100644 index 00000000000..0d0afd850ee --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go @@ -0,0 +1,926 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/rsa" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "time" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/signature" +) + +// Root return new metadata instance of type Root +func Root(expires ...time.Time) *Metadata[RootType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + // populate Roles + roles := map[string]*Role{} + for _, r := range []string{ROOT, SNAPSHOT, TARGETS, TIMESTAMP} { + roles[r] = &Role{ + KeyIDs: []string{}, + Threshold: 1, + } + } + log.Info("Created metadata", "type", ROOT) + return &Metadata[RootType]{ + Signed: RootType{ + Type: ROOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Keys: map[string]*Key{}, + Roles: roles, + ConsistentSnapshot: true, + }, + Signatures: []Signature{}, + } +} + +// Snapshot return new metadata instance of type Snapshot +func Snapshot(expires ...time.Time) *Metadata[SnapshotType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", SNAPSHOT) + return &Metadata[SnapshotType]{ + Signed: SnapshotType{ + Type: SNAPSHOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "targets.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Timestamp return new metadata instance of type Timestamp +func Timestamp(expires ...time.Time) *Metadata[TimestampType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TIMESTAMP) + return &Metadata[TimestampType]{ + Signed: TimestampType{ + Type: TIMESTAMP, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "snapshot.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Targets return new metadata instance of type Targets +func Targets(expires ...time.Time) *Metadata[TargetsType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TARGETS) + return &Metadata[TargetsType]{ + Signed: TargetsType{ + Type: TARGETS, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Targets: map[string]*TargetFiles{}, + }, + Signatures: []Signature{}, + } +} + +// TargetFile return new metadata instance of type TargetFiles +func TargetFile() *TargetFiles { + return &TargetFiles{ + Length: 0, + Hashes: Hashes{}, + } +} + +// MetaFile return new metadata instance of type MetaFile +func MetaFile(version int64) *MetaFiles { + if version < 1 { + // attempting to set incorrect version + log.Info("Attempting to set incorrect version for MetaFile", "version", version) + version = 1 + } + return &MetaFiles{ + Length: 0, + Hashes: Hashes{}, + Version: version, + } +} + +// FromFile load metadata from file +func (meta *Metadata[T]) FromFile(name string) (*Metadata[T], error) { + data, err := os.ReadFile(name) + if err != nil { + return nil, err + } + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from file", "name", name) + return meta, nil +} + +// FromBytes deserialize metadata from bytes +func (meta *Metadata[T]) FromBytes(data []byte) (*Metadata[T], error) { + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from bytes") + return meta, nil +} + +// ToBytes serialize metadata to bytes +func (meta *Metadata[T]) ToBytes(pretty bool) ([]byte, error) { + log.Info("Writing metadata to bytes") + if pretty { + return json.MarshalIndent(*meta, "", "\t") + } + return json.Marshal(*meta) +} + +// ToFile save metadata to file +func (meta *Metadata[T]) ToFile(name string, pretty bool) error { + log.Info("Writing metadata to file", "name", name) + data, err := meta.ToBytes(pretty) + if err != nil { + return err + } + return os.WriteFile(name, data, 0644) +} + +// Sign create signature over Signed and assign it to Signatures +func (meta *Metadata[T]) Sign(signer signature.Signer) (*Signature, error) { + // encode the Signed part to canonical JSON so signatures are consistent + payload, err := cjson.EncodeCanonical(meta.Signed) + if err != nil { + return nil, err + } + // sign the Signed part + sb, err := signer.SignMessage(bytes.NewReader(payload)) + if err != nil { + return nil, &ErrUnsignedMetadata{Msg: "problem signing metadata"} + } + // get the signer's PublicKey + publ, err := signer.PublicKey() + if err != nil { + return nil, err + } + // convert to TUF Key type to get keyID + key, err := KeyFromPublicKey(publ) + if err != nil { + return nil, err + } + // build signature + sig := &Signature{ + KeyID: key.ID(), + Signature: sb, + } + // update the Signatures part + meta.Signatures = append(meta.Signatures, *sig) + // return the new signature + log.Info("Signed metadata with key", "ID", key.ID()) + return sig, nil +} + +// VerifyDelegate verifies that delegatedMetadata is signed with the required +// threshold of keys for the delegated role delegatedRole +func (meta *Metadata[T]) VerifyDelegate(delegatedRole string, delegatedMetadata any) error { + i := any(meta) + signingKeys := map[string]bool{} + var keys map[string]*Key + var roleKeyIDs []string + var roleThreshold int + + log.Info("Verifying", "role", delegatedRole) + + // collect keys, keyIDs and threshold based on delegator type + switch i := i.(type) { + // Root delegator + case *Metadata[RootType]: + keys = i.Signed.Keys + if role, ok := (*i).Signed.Roles[delegatedRole]; ok { + roleKeyIDs = role.KeyIDs + roleThreshold = role.Threshold + } else { + // the delegated role was not found, no need to proceed + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // Targets delegator + case *Metadata[TargetsType]: + if i.Signed.Delegations == nil { + return &ErrValue{Msg: "no delegations found"} + } + keys = i.Signed.Delegations.Keys + if i.Signed.Delegations.Roles != nil { + found := false + for _, v := range i.Signed.Delegations.Roles { + if v.Name == delegatedRole { + found = true + roleKeyIDs = v.KeyIDs + roleThreshold = v.Threshold + break + } + } + // the delegated role was not found, no need to proceed + if !found { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + } else if i.Signed.Delegations.SuccinctRoles != nil { + roleKeyIDs = i.Signed.Delegations.SuccinctRoles.KeyIDs + roleThreshold = i.Signed.Delegations.SuccinctRoles.Threshold + } + default: + return &ErrType{Msg: "call is valid only on delegator metadata (should be either root or targets)"} + } + // if there are no keyIDs for that role it means there's no delegation found + if len(roleKeyIDs) == 0 { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // loop through each role keyID + for _, keyID := range roleKeyIDs { + key, ok := keys[keyID] + if !ok { + return &ErrValue{Msg: fmt.Sprintf("key with ID %s not found in %s keyids", keyID, delegatedRole)} + } + sign := Signature{} + var payload []byte + // convert to a PublicKey type + publicKey, err := key.ToPublicKey() + if err != nil { + return err + } + // use corresponding hash function for key type + hash := crypto.Hash(0) + if key.Type != KeyTypeEd25519 { + switch key.Scheme { + case KeySchemeECDSA_SHA2_P256: + hash = crypto.SHA256 + case KeySchemeECDSA_SHA2_P384: + hash = crypto.SHA384 + default: + hash = crypto.SHA256 + } + } + // load a verifier based on that key + // handle RSA PSS scheme separately as the LoadVerifier function doesn't identify it correctly + // Note we should support RSA PSS, not RSA PKCS1v15 (which is what LoadVerifier would return) + // Reference: https://theupdateframework.github.io/specification/latest/#file-formats-keys + var verifier signature.Verifier + if key.Type == KeyTypeRSASSA_PSS_SHA256 { + // Load a verifier for rsa + publicKeyRSAPSS, ok := publicKey.(*rsa.PublicKey) + if !ok { + return &ErrType{Msg: "failed to convert public key to RSA PSS key"} + } + verifier, err = signature.LoadRSAPSSVerifier(publicKeyRSAPSS, hash, &rsa.PSSOptions{Hash: crypto.SHA256}) + } else { + // Load a verifier for ed25519 and ecdsa + verifier, err = signature.LoadVerifier(publicKey, hash) + } + if err != nil { + return err + } + // collect the signature for that key and build the payload we'll verify + // based on the Signed part of the delegated metadata + switch d := delegatedMetadata.(type) { + case *Metadata[RootType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[SnapshotType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TimestampType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TargetsType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + default: + return &ErrType{Msg: "unknown delegated metadata type"} + } + // verify if the signature for that payload corresponds to the given key + if err := verifier.VerifySignature(bytes.NewReader(sign.Signature), bytes.NewReader(payload)); err != nil { + // failed to verify the metadata with that key ID + log.Info("Failed to verify %s with key ID %s", delegatedRole, keyID) + } else { + // save the verified keyID only if verification passed + signingKeys[keyID] = true + log.Info("Verified with key", "role", delegatedRole, "ID", keyID) + } + } + // check if the amount of valid signatures is enough + if len(signingKeys) < roleThreshold { + log.Info("Verifying failed, not enough signatures", "role", delegatedRole, "got", len(signingKeys), "want", roleThreshold) + return &ErrUnsignedMetadata{Msg: fmt.Sprintf("Verifying %s failed, not enough signatures, got %d, want %d", delegatedRole, len(signingKeys), roleThreshold)} + } + log.Info("Verified successfully", "role", delegatedRole) + return nil +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *RootType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *SnapshotType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TimestampType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TargetsType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// VerifyLengthHashes checks whether the MetaFiles data matches its corresponding +// length and hashes +func (f *MetaFiles) VerifyLengthHashes(data []byte) error { + // hashes and length are optional for MetaFiles + if len(f.Hashes) > 0 { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + } + if f.Length != 0 { + err := verifyLength(data, f.Length) + if err != nil { + return err + } + } + return nil +} + +// VerifyLengthHashes checks whether the TargetFiles data matches its corresponding +// length and hashes +func (f *TargetFiles) VerifyLengthHashes(data []byte) error { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + err = verifyLength(data, f.Length) + if err != nil { + return err + } + return nil +} + +// Equal checks whether the source target file matches another +func (source *TargetFiles) Equal(expected TargetFiles) bool { + if source.Length == expected.Length && source.Hashes.Equal(expected.Hashes) { + return true + } + return false +} + +// FromFile generate TargetFiles from file +func (t *TargetFiles) FromFile(localPath string, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from file", "path", localPath) + // read file + data, err := os.ReadFile(localPath) + if err != nil { + return nil, err + } + return t.FromBytes(localPath, data, hashes...) +} + +// FromBytes generate TargetFiles from bytes +func (t *TargetFiles) FromBytes(localPath string, data []byte, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from bytes", "path", localPath) + var hasher hash.Hash + targetFile := &TargetFiles{ + Hashes: map[string]HexBytes{}, + } + // use default hash algorithm if not set + if len(hashes) == 0 { + hashes = []string{"sha256"} + } + // calculate length + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return nil, err + } + targetFile.Length = len + for _, v := range hashes { + switch v { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return nil, &ErrValue{Msg: fmt.Sprintf("failed generating TargetFile - unsupported hashing algorithm - %s", v)} + } + _, err := hasher.Write(data) + if err != nil { + return nil, err + } + targetFile.Hashes[v] = hasher.Sum(nil) + } + targetFile.Path = localPath + return targetFile, nil +} + +// ClearSignatures clears Signatures +func (meta *Metadata[T]) ClearSignatures() { + log.Info("Cleared signatures") + meta.Signatures = []Signature{} +} + +// IsDelegatedPath determines whether the given "targetFilepath" is in one of +// the paths that "DelegatedRole" is trusted to provide +func (role *DelegatedRole) IsDelegatedPath(targetFilepath string) (bool, error) { + if len(role.Paths) > 0 { + // standard delegations + for _, pathPattern := range role.Paths { + // A delegated role path may be an explicit path or glob + // pattern (Unix shell-style wildcards). + if isTargetInPathPattern(targetFilepath, pathPattern) { + return true, nil + } + } + } else if len(role.PathHashPrefixes) > 0 { + // hash bin delegations - calculate the hash of the filepath to determine in which bin to find the target. + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + for _, pathHashPrefix := range role.PathHashPrefixes { + if strings.HasPrefix(base64.URLEncoding.EncodeToString(targetFilepathHash[:]), pathHashPrefix) { + return true, nil + } + } + } + return false, nil +} + +// Determine whether “targetpath“ matches the “pathpattern“. +func isTargetInPathPattern(targetpath string, pathpattern string) bool { + // We need to make sure that targetpath and pathpattern are pointing to + // the same directory as fnmatch doesn't threat "/" as a special symbol. + targetParts := strings.Split(targetpath, "/") + patternParts := strings.Split(pathpattern, "/") + if len(targetParts) != len(patternParts) { + return false + } + + // Every part in the pathpattern could include a glob pattern, that's why + // each of the target and pathpattern parts should match. + for i := 0; i < len(targetParts); i++ { + if ok, _ := filepath.Match(patternParts[i], targetParts[i]); !ok { + return false + } + } + + return true +} + +// GetRolesForTarget return the names and terminating status of all +// delegated roles who are responsible for targetFilepath +// Note the result should be an ordered list, ref. https://github.com/theupdateframework/go-tuf/security/advisories/GHSA-4f8r-qqr9-fq8j +func (role *Delegations) GetRolesForTarget(targetFilepath string) []RoleResult { + var res []RoleResult + // Standard delegations + if role.Roles != nil { + for _, r := range role.Roles { + ok, err := r.IsDelegatedPath(targetFilepath) + if err == nil && ok { + res = append(res, RoleResult{Name: r.Name, Terminating: r.Terminating}) + } + } + } else if role.SuccinctRoles != nil { + // SuccinctRoles delegations + res = role.SuccinctRoles.GetRolesForTarget(targetFilepath) + } + // We preserve the same order as the actual roles list + return res +} + +// GetRolesForTarget calculate the name of the delegated role responsible for "targetFilepath". +// The target at path "targetFilepath" is assigned to a bin by casting +// the left-most "BitLength" of bits of the file path hash digest to +// int, using it as bin index between 0 and “2**BitLength-1”. +func (role *SuccinctRoles) GetRolesForTarget(targetFilepath string) []RoleResult { + // calculate the suffixLen value based on the total number of bins in + // hex. If bit_length = 10 then numberOfBins = 1024 or bin names will + // have a suffix between "000" and "3ff" in hex and suffixLen will be 3 + // meaning the third bin will have a suffix of "003" + numberOfBins := math.Pow(2, float64(role.BitLength)) + // suffixLen is calculated based on "numberOfBins - 1" as the name + // of the last bin contains the number "numberOfBins -1" as a suffix. + suffixLen := len(strconv.FormatInt(int64(numberOfBins-1), 16)) + + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + // we can't ever need more than 4 bytes (32 bits) + hashBytes := targetFilepathHash[:4] + + // right shift hash bytes, so that we only have the leftmost + // bit_length bits that we care about + shiftValue := 32 - role.BitLength + binNumber := binary.BigEndian.Uint32(hashBytes) >> shiftValue + // add zero padding if necessary and cast to hex the suffix + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + // we consider all succinct_roles as terminating. + // for more information, read TAP 15. + return []RoleResult{{Name: fmt.Sprintf("%s-%s", role.NamePrefix, suffix), Terminating: true}} +} + +// GetRoles returns the names of all different delegated roles +func (role *SuccinctRoles) GetRoles() []string { + res := []string{} + suffixLen, numberOfBins := role.GetSuffixLen() + + for binNumber := 0; binNumber < numberOfBins; binNumber++ { + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + res = append(res, fmt.Sprintf("%s-%s", role.NamePrefix, suffix)) + } + return res +} + +func (role *SuccinctRoles) GetSuffixLen() (int, int) { + numberOfBins := int(math.Pow(2, float64(role.BitLength))) + return len(strconv.FormatInt(int64(numberOfBins-1), 16)), numberOfBins +} + +// IsDelegatedRole returns whether the given roleName is in one of +// the delegated roles that “SuccinctRoles“ represents +func (role *SuccinctRoles) IsDelegatedRole(roleName string) bool { + suffixLen, numberOfBins := role.GetSuffixLen() + + expectedPrefix := fmt.Sprintf("%s-", role.NamePrefix) + + // check if the roleName prefix is what we would expect + if !strings.HasPrefix(roleName, expectedPrefix) { + return false + } + + // check if the roleName suffix length is what we would expect + suffix := roleName[len(expectedPrefix):] + if len(suffix) != suffixLen { + return false + } + + // make sure suffix is hex value and get bin number + value, err := strconv.ParseInt(suffix, 16, 64) + if err != nil { + return false + } + + // check if the bin we calculated is indeed within the range of what we support + return (value >= 0) && (value < int64(numberOfBins)) +} + +// AddKey adds new signing key for delegated role "role" +// keyID: Identifier of the key to be added for “role“. +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +func (signed *RootType) AddKey(key *Key, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // add keyID to role + if !slices.Contains(signed.Roles[role].KeyIDs, key.ID()) { + signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, key.ID()) + } + // update Keys + signed.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revoke key from “role“ and updates the Keys store. +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *RootType) RevokeKey(keyID, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // verify keyID is present for given role + if !slices.Contains(signed.Roles[role].KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Roles[role].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice + signed.Roles[role].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Keys, keyID) + return nil +} + +// AddKey adds new signing key for delegated role "role" +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +// If SuccinctRoles is used then the "role" argument can be ignored. +func (signed *TargetsType) AddKey(key *Key, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + isDelegatedRole := false + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + isDelegatedRole = true + // add key if keyID is not already part of keyIDs for that role + if !slices.Contains(d.KeyIDs, key.ID()) { + signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("Delegated role already has keyID", "role", role, "ID", key.ID()) + } + } + if !isDelegatedRole { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + } else if signed.Delegations.SuccinctRoles != nil { + // add key if keyID is not already part of keyIDs for the SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) { + signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("SuccinctRoles role already has keyID", "ID", key.ID()) + + } + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revokes key from delegated role "role" and updates the delegations key store +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *TargetsType) RevokeKey(keyID string, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + // check if keyID is present in keyIDs for that role + if !slices.Contains(d.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.Roles[i].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for that role + signed.Delegations.Roles[i].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Delegations.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + } + // we haven't found the delegated role + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } else if signed.Delegations.SuccinctRoles != nil { + // check if keyID is used by SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by SuccinctRoles", keyID)} + } + // remove keyID from the SuccinctRoles role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.SuccinctRoles.KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for SuccinctRoles role + signed.Delegations.SuccinctRoles.KeyIDs = filteredKeyIDs + + // delete the keyID from Keys since it can not be used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} +} + +// Equal checks whether one hash set equals another +func (source Hashes) Equal(expected Hashes) bool { + hashChecked := false + for typ, hash := range expected { + if h, ok := source[typ]; ok { + // hash type match found + hashChecked = true + if !hmac.Equal(h, hash) { + // hash values don't match + return false + } + } + } + return hashChecked +} + +// verifyLength verifies if the passed data has the corresponding length +func verifyLength(data []byte, length int64) error { + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return err + } + if length != len { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("length verification failed - expected %d, got %d", length, len)} + } + return nil +} + +// verifyHashes verifies if the hash of the passed data corresponds to it +func verifyHashes(data []byte, hashes Hashes) error { + var hasher hash.Hash + for k, v := range hashes { + switch k { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - unknown hashing algorithm - %s", k)} + } + hasher.Write(data) + if hex.EncodeToString(v) != hex.EncodeToString(hasher.Sum(nil)) { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - mismatch for algorithm %s", k)} + } + } + return nil +} + +// fromBytes return a *Metadata[T] object from bytes and verifies +// that the data corresponds to the caller struct type +func fromBytes[T Roles](data []byte) (*Metadata[T], error) { + meta := &Metadata[T]{} + // verify that the type we used to create the object is the same as the type of the metadata file + if err := checkType[T](data); err != nil { + return nil, err + } + // if all is okay, unmarshal meta to the desired Metadata[T] type + if err := json.Unmarshal(data, meta); err != nil { + return nil, err + } + // Make sure signature key IDs are unique + if err := checkUniqueSignatures(*meta); err != nil { + return nil, err + } + return meta, nil +} + +// checkUniqueSignatures verifies if the signature key IDs are unique for that metadata +func checkUniqueSignatures[T Roles](meta Metadata[T]) error { + signatures := []string{} + for _, sig := range meta.Signatures { + if slices.Contains(signatures, sig.KeyID) { + return &ErrValue{Msg: fmt.Sprintf("multiple signatures found for key ID %s", sig.KeyID)} + } + signatures = append(signatures, sig.KeyID) + } + return nil +} + +// checkType verifies if the generic type used to create the object is the same as the type of the metadata file in bytes +func checkType[T Roles](data []byte) error { + var m map[string]any + i := any(new(T)) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + signedType := m["signed"].(map[string]any)["_type"].(string) + switch i.(type) { + case *RootType: + if ROOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", ROOT, signedType)} + } + case *SnapshotType: + if SNAPSHOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", SNAPSHOT, signedType)} + } + case *TimestampType: + if TIMESTAMP != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TIMESTAMP, signedType)} + } + case *TargetsType: + if TARGETS != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TARGETS, signedType)} + } + default: + return &ErrValue{Msg: fmt.Sprintf("unrecognized metadata type - %s", signedType)} + } + // all okay + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go new file mode 100644 index 00000000000..0726f31f881 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go @@ -0,0 +1,357 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package trustedmetadata + +import ( + "fmt" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// TrustedMetadata struct for storing trusted metadata +type TrustedMetadata struct { + Root *metadata.Metadata[metadata.RootType] + Snapshot *metadata.Metadata[metadata.SnapshotType] + Timestamp *metadata.Metadata[metadata.TimestampType] + Targets map[string]*metadata.Metadata[metadata.TargetsType] + RefTime time.Time +} + +// New creates a new TrustedMetadata instance which ensures that the +// collection of metadata in it is valid and trusted through the whole +// client update workflow. It provides easy ways to update the metadata +// with the caller making decisions on what is updated +func New(rootData []byte) (*TrustedMetadata, error) { + res := &TrustedMetadata{ + Targets: map[string]*metadata.Metadata[metadata.TargetsType]{}, + RefTime: time.Now().UTC(), + } + // load and validate the local root metadata + // valid initial trusted root metadata is required + err := res.loadTrustedRoot(rootData) + if err != nil { + return nil, err + } + return res, nil +} + +// UpdateRoot verifies and loads “rootData“ as new root metadata. +// Note that an expired intermediate root is considered valid: expiry is +// only checked for the final root in UpdateTimestamp() +func (trusted *TrustedMetadata) UpdateRoot(rootData []byte) (*metadata.Metadata[metadata.RootType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update root after timestamp"} + } + log.Info("Updating root") + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return nil, err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify that new root is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // verify version + if newRoot.Signed.Version != trusted.Root.Signed.Version+1 { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("bad version number, expected %d, got %d", trusted.Root.Signed.Version+1, newRoot.Signed.Version)} + } + // verify that new root is signed by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // save root if verified + trusted.Root = newRoot + log.Info("Updated root", "version", trusted.Root.Signed.Version) + return trusted.Root, nil +} + +// UpdateTimestamp verifies and loads “timestampData“ as new timestamp metadata. +// Note that an intermediate timestamp is allowed to be expired. "TrustedMetadata" +// will error in this case but the intermediate timestamp will be loaded. +// This way a newer timestamp can still be loaded (and the intermediate +// timestamp will be used for rollback protection). Expired timestamp will +// prevent loading snapshot metadata. +func (trusted *TrustedMetadata) UpdateTimestamp(timestampData []byte) (*metadata.Metadata[metadata.TimestampType], error) { + log := metadata.GetLogger() + + if trusted.Snapshot != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update timestamp after snapshot"} + } + // client workflow 5.3.10: Make sure final root is not expired. + if trusted.Root.Signed.IsExpired(trusted.RefTime) { + // no need to check for 5.3.11 (fast forward attack recovery): + // timestamp/snapshot can not yet be loaded at this point + return nil, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"} + } + log.Info("Updating timestamp") + newTimestamp, err := metadata.Timestamp().FromBytes(timestampData) + if err != nil { + return nil, err + } + // check metadata type matches timestamp + if newTimestamp.Signed.Type != metadata.TIMESTAMP { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TIMESTAMP, newTimestamp.Signed.Type)} + } + // verify that new timestamp is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.TIMESTAMP, newTimestamp) + if err != nil { + return nil, err + } + // if an existing trusted timestamp is updated, + // check for a rollback attack + if trusted.Timestamp != nil { + // prevent rolling back timestamp version + if newTimestamp.Signed.Version < trusted.Timestamp.Signed.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new timestamp version %d must be >= %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // keep using old timestamp if versions are equal + if newTimestamp.Signed.Version == trusted.Timestamp.Signed.Version { + log.Info("New timestamp version equals the old one", "new", newTimestamp.Signed.Version, "old", trusted.Timestamp.Signed.Version) + return nil, &metadata.ErrEqualVersionNumber{Msg: fmt.Sprintf("new timestamp version %d equals the old one %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // prevent rolling back snapshot version + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + newSnapshotMeta := newTimestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if newSnapshotMeta.Version < snapshotMeta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new snapshot version %d must be >= %d", newSnapshotMeta.Version, snapshotMeta.Version)} + } + } + // expiry not checked to allow old timestamp to be used for rollback + // protection of new timestamp: expiry is checked in UpdateSnapshot() + // save root if verified + trusted.Timestamp = newTimestamp + log.Info("Updated timestamp", "version", trusted.Timestamp.Signed.Version) + + // timestamp is loaded: error if it is not valid _final_ timestamp + err = trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + // all okay + return trusted.Timestamp, nil +} + +// checkFinalTimestamp verifies if trusted timestamp is not expired +func (trusted *TrustedMetadata) checkFinalTimestamp() error { + if trusted.Timestamp.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"} + } + return nil +} + +// UpdateSnapshot verifies and loads “snapshotData“ as new snapshot metadata. +// Note that an intermediate snapshot is allowed to be expired and version +// is allowed to not match timestamp meta version: TrustedMetadata +// will error for case of expired metadata or when using bad versions but the +// intermediate snapshot will be loaded. This way a newer snapshot can still +// be loaded (and the intermediate snapshot will be used for rollback protection). +// Expired snapshot or snapshot that does not match timestamp meta version will +// prevent loading targets. +func (trusted *TrustedMetadata) UpdateSnapshot(snapshotData []byte, isTrusted bool) (*metadata.Metadata[metadata.SnapshotType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot before timestamp"} + } + if trusted.Targets[metadata.TARGETS] != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot after targets"} + } + log.Info("Updating snapshot") + + // snapshot cannot be loaded if final timestamp is expired + err := trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // verify non-trusted data against the hashes in timestamp, if any. + // trusted snapshot data has already been verified once. + if !isTrusted { + err = snapshotMeta.VerifyLengthHashes(snapshotData) + if err != nil { + return nil, err + } + } + newSnapshot, err := metadata.Snapshot().FromBytes(snapshotData) + if err != nil { + return nil, err + } + // check metadata type matches snapshot + if newSnapshot.Signed.Type != metadata.SNAPSHOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.SNAPSHOT, newSnapshot.Signed.Type)} + } + // verify that new snapshot is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.SNAPSHOT, newSnapshot) + if err != nil { + return nil, err + } + + // version not checked against meta version to allow old snapshot to be + // used in rollback protection: it is checked when targets is updated + + // if an existing trusted snapshot is updated, check for rollback attack + if trusted.Snapshot != nil { + for name, info := range trusted.Snapshot.Signed.Meta { + newFileInfo, ok := newSnapshot.Signed.Meta[name] + // prevent removal of any metadata in meta + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("new snapshot is missing info for %s", name)} + } + // prevent rollback of any metadata versions + if newFileInfo.Version < info.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", name, newFileInfo.Version, info.Version)} + } + } + } + + // expiry not checked to allow old snapshot to be used for rollback + // protection of new snapshot: it is checked when targets is updated + trusted.Snapshot = newSnapshot + log.Info("Updated snapshot", "version", trusted.Snapshot.Signed.Version) + + // snapshot is loaded, but we error if it's not valid _final_ snapshot + err = trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // all okay + return trusted.Snapshot, nil +} + +// checkFinalSnapshot verifies if it's not expired and snapshot version matches timestamp meta version +func (trusted *TrustedMetadata) checkFinalSnapshot() error { + if trusted.Snapshot.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"} + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if trusted.Snapshot.Signed.Version != snapshotMeta.Version { + return &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %d, got %d", snapshotMeta.Version, trusted.Snapshot.Signed.Version)} + } + return nil +} + +// UpdateTargets verifies and loads “targetsData“ as new top-level targets metadata. +func (trusted *TrustedMetadata) UpdateTargets(targetsData []byte) (*metadata.Metadata[metadata.TargetsType], error) { + return trusted.UpdateDelegatedTargets(targetsData, metadata.TARGETS, metadata.ROOT) +} + +// UpdateDelegatedTargets verifies and loads “targetsData“ as new metadata for target “roleName“ +func (trusted *TrustedMetadata) UpdateDelegatedTargets(targetsData []byte, roleName, delegatorName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + + var ok bool + if trusted.Snapshot == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before snapshot"} + } + // targets cannot be loaded if final snapshot is expired or its version + // does not match meta version in timestamp + err := trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // check if delegator metadata is present + if delegatorName == metadata.ROOT { + if trusted.Root != nil { + ok = true + } else { + ok = false + } + } else { + _, ok = trusted.Targets[delegatorName] + } + if !ok { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before delegator"} + } + log.Info("Updating delegated role", "role", roleName, "delegator", delegatorName) + // Verify against the hashes in snapshot, if any + meta, ok := trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("snapshot does not contain information for %s", roleName)} + } + err = meta.VerifyLengthHashes(targetsData) + if err != nil { + return nil, err + } + newDelegate, err := metadata.Targets().FromBytes(targetsData) + if err != nil { + return nil, err + } + // check metadata type matches targets + if newDelegate.Signed.Type != metadata.TARGETS { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TARGETS, newDelegate.Signed.Type)} + } + // get delegator metadata and verify the new delegatee + if delegatorName == metadata.ROOT { + err = trusted.Root.VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } else { + err = trusted.Targets[delegatorName].VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } + // check versions + if newDelegate.Signed.Version != meta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", roleName, meta.Version, newDelegate.Signed.Version)} + } + // check expiration + if newDelegate.Signed.IsExpired(trusted.RefTime) { + return nil, &metadata.ErrExpiredMetadata{Msg: fmt.Sprintf("new %s is expired", roleName)} + } + trusted.Targets[roleName] = newDelegate + log.Info("Updated role", "role", roleName, "version", trusted.Targets[roleName].Signed.Version) + return trusted.Targets[roleName], nil +} + +// loadTrustedRoot verifies and loads "data" as trusted root metadata. +// Note that an expired initial root is considered valid: expiry is +// only checked for the final root in “UpdateTimestamp()“. +func (trusted *TrustedMetadata) loadTrustedRoot(rootData []byte) error { + log := metadata.GetLogger() + + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify root by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return err + } + // save root if verified + trusted.Root = newRoot + log.Info("Loaded trusted root", "version", trusted.Root.Signed.Version) + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go new file mode 100644 index 00000000000..0bd86ee8f23 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go @@ -0,0 +1,179 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/json" + "time" +) + +// Generic type constraint +type Roles interface { + RootType | SnapshotType | TimestampType | TargetsType +} + +// Define version of the TUF specification +const ( + SPECIFICATION_VERSION = "1.0.31" +) + +// Define top level role names +const ( + ROOT = "root" + SNAPSHOT = "snapshot" + TARGETS = "targets" + TIMESTAMP = "timestamp" +) + +var TOP_LEVEL_ROLE_NAMES = [...]string{ROOT, TIMESTAMP, SNAPSHOT, TARGETS} + +// Metadata[T Roles] represents a TUF metadata. +// Provides methods to read and write to and +// from file and bytes, also to create, verify and clear metadata signatures. +type Metadata[T Roles] struct { + Signed T `json:"signed"` + Signatures []Signature `json:"signatures"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Signature represents the Signature part of a TUF metadata +type Signature struct { + KeyID string `json:"keyid"` + Signature HexBytes `json:"sig"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RootType represents the Signed portion of a root metadata +type RootType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + ConsistentSnapshot bool `json:"consistent_snapshot"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Keys map[string]*Key `json:"keys"` + Roles map[string]*Role `json:"roles"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SnapshotType represents the Signed portion of a snapshot metadata +type SnapshotType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetsType represents the Signed portion of a targets metadata +type TargetsType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Targets map[string]*TargetFiles `json:"targets"` + Delegations *Delegations `json:"delegations,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TimestampType represents the Signed portion of a timestamp metadata +type TimestampType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Key represents a key in TUF +type Key struct { + Type string `json:"keytype"` + Scheme string `json:"scheme"` + Value KeyVal `json:"keyval"` + id string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +type KeyVal struct { + PublicKey string `json:"public"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Role represents one of the top-level roles in TUF +type Role struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + UnrecognizedFields map[string]any `json:"-"` +} + +type HexBytes []byte + +type Hashes map[string]HexBytes + +// MetaFiles represents the value portion of METAFILES in TUF (used in Snapshot and Timestamp metadata). Used to store information about a particular meta file. +type MetaFiles struct { + Length int64 `json:"length,omitempty"` + Hashes Hashes `json:"hashes,omitempty"` + Version int64 `json:"version"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetFiles represents the value portion of TARGETS in TUF (used Targets metadata). Used to store information about a particular target file. +type TargetFiles struct { + Length int64 `json:"length"` + Hashes Hashes `json:"hashes"` + Custom *json.RawMessage `json:"custom,omitempty"` + Path string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Delegations is an optional object which represents delegation roles and their corresponding keys +type Delegations struct { + Keys map[string]*Key `json:"keys"` + Roles []DelegatedRole `json:"roles,omitempty"` + SuccinctRoles *SuccinctRoles `json:"succinct_roles,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// DelegatedRole represents a delegated role in TUF +type DelegatedRole struct { + Name string `json:"name"` + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + Terminating bool `json:"terminating"` + PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"` + Paths []string `json:"paths,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SuccinctRoles represents a delegation graph that covers all targets, +// distributing them uniformly over the delegated roles (i.e. bins) in the graph. +type SuccinctRoles struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + BitLength int `json:"bit_length"` + NamePrefix string `json:"name_prefix"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RoleResult represents the name and terminating status of a delegated role that is responsible for targetFilepath +type RoleResult struct { + Name string + Terminating bool +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go new file mode 100644 index 00000000000..bd533b63c3d --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go @@ -0,0 +1,704 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package updater + +import ( + "encoding/hex" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "slices" + "strconv" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata" +) + +// Client update workflow implementation +// +// The "Updater" provides an implementation of the TUF client workflow (ref. https://theupdateframework.github.io/specification/latest/#detailed-client-workflow). +// "Updater" provides an API to query available targets and to download them in a +// secure manner: All downloaded files are verified by signed metadata. +// High-level description of "Updater" functionality: +// - Initializing an "Updater" loads and validates the trusted local root +// metadata: This root metadata is used as the source of trust for all other +// metadata. +// - Refresh() can optionally be called to update and load all top-level +// metadata as described in the specification, using both locally cached +// metadata and metadata downloaded from the remote repository. If refresh is +// not done explicitly, it will happen automatically during the first target +// info lookup. +// - Updater can be used to download targets. For each target: +// - GetTargetInfo() is first used to find information about a +// specific target. This will load new targets metadata as needed (from +// local cache or remote repository). +// - FindCachedTarget() can optionally be used to check if a +// target file is already locally cached. +// - DownloadTarget() downloads a target file and ensures it is +// verified correct by the metadata. +type Updater struct { + trusted *trustedmetadata.TrustedMetadata + cfg *config.UpdaterConfig +} + +type roleParentTuple struct { + Role string + Parent string +} + +// New creates a new Updater instance and loads trusted root metadata +func New(config *config.UpdaterConfig) (*Updater, error) { + // make sure the trusted root metadata and remote URL were provided + if len(config.LocalTrustedRoot) == 0 || len(config.RemoteMetadataURL) == 0 { + return nil, fmt.Errorf("no initial trusted root metadata or remote URL provided") + } + // create a new trusted metadata instance using the trusted root.json + trustedMetadataSet, err := trustedmetadata.New(config.LocalTrustedRoot) + if err != nil { + return nil, err + } + // create an updater instance + updater := &Updater{ + cfg: config, + trusted: trustedMetadataSet, // save trusted metadata set + } + // ensure paths exist, doesn't do anything if caching is disabled + err = updater.cfg.EnsurePathsExist() + if err != nil { + return nil, err + } + // persist the initial root metadata to the local metadata folder + err = updater.persistMetadata(metadata.ROOT, updater.cfg.LocalTrustedRoot) + if err != nil { + return nil, err + } + // all okay, return the updater instance + return updater, nil +} + +// Refresh loads and possibly refreshes top-level metadata. +// Downloads, verifies, and loads metadata for the top-level roles in the +// specified order (root -> timestamp -> snapshot -> targets) implementing +// all the checks required in the TUF client workflow. +// A Refresh() can be done only once during the lifetime of an Updater. +// If Refresh() has not been explicitly called before the first +// GetTargetInfo() call, it will be done implicitly at that time. +// The metadata for delegated roles is not updated by Refresh(): +// that happens on demand during GetTargetInfo(). However, if the +// repository uses consistent snapshots (ref. https://theupdateframework.github.io/specification/latest/#consistent-snapshots), +// then all metadata downloaded by the Updater will use the same consistent repository state. +// +// If UnsafeLocalMode is set, no network interaction is performed, only +// the cached files on disk are used. If the cached data is not complete, +// this call will fail. +func (update *Updater) Refresh() error { + if update.cfg.UnsafeLocalMode { + return update.unsafeLocalRefresh() + } + return update.onlineRefresh() +} + +// onlineRefresh implements the TUF client workflow as described for +// the Refresh function. +func (update *Updater) onlineRefresh() error { + err := update.loadRoot() + if err != nil { + return err + } + err = update.loadTimestamp() + if err != nil { + return err + } + err = update.loadSnapshot() + if err != nil { + return err + } + _, err = update.loadTargets(metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + return nil +} + +// unsafeLocalRefresh tries to load the persisted metadata already cached +// on disk. Note that this is an usafe function, and does deviate from the +// TUF specification section 5.3 to 5.7 (update phases). +// The metadata on disk are verified against the provided root though, +// and expiration dates are verified. +func (update *Updater) unsafeLocalRefresh() error { + // Root is already loaded + // load timestamp + var p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP) + data, err := update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + return err + } + + // load snapshot + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + + // targets + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TARGETS) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + // verify and load the new target metadata + _, err = update.trusted.UpdateDelegatedTargets(data, metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + + return nil +} + +// GetTargetInfo returns metadata.TargetFiles instance with information +// for targetPath. The return value can be used as an argument to +// DownloadTarget() and FindCachedTarget(). +// If Refresh() has not been called before calling +// GetTargetInfo(), the refresh will be done implicitly. +// As a side-effect this method downloads all the additional (delegated +// targets) metadata it needs to return the target information. +func (update *Updater) GetTargetInfo(targetPath string) (*metadata.TargetFiles, error) { + // do a Refresh() in case there's no trusted targets.json yet + if update.trusted.Targets[metadata.TARGETS] == nil { + err := update.Refresh() + if err != nil { + return nil, err + } + } + return update.preOrderDepthFirstWalk(targetPath) +} + +// DownloadTarget downloads the target file specified by targetFile +func (update *Updater) DownloadTarget(targetFile *metadata.TargetFiles, filePath, targetBaseURL string) (string, []byte, error) { + log := metadata.GetLogger() + + var err error + if filePath == "" { + filePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } + if targetBaseURL == "" { + if update.cfg.RemoteTargetsURL == "" { + return "", nil, &metadata.ErrValue{Msg: "targetBaseURL must be set in either DownloadTarget() or the Updater struct"} + } + targetBaseURL = ensureTrailingSlash(update.cfg.RemoteTargetsURL) + } else { + targetBaseURL = ensureTrailingSlash(targetBaseURL) + } + + targetFilePath := targetFile.Path + targetRemotePath := targetFilePath + consistentSnapshot := update.trusted.Root.Signed.ConsistentSnapshot + if consistentSnapshot && update.cfg.PrefixTargetsWithHash { + hashes := "" + // get first hex value of hashes + for _, v := range targetFile.Hashes { + hashes = hex.EncodeToString(v) + break + } + baseName := filepath.Base(targetFilePath) + dirName, ok := strings.CutSuffix(targetFilePath, "/"+baseName) + if !ok { + // . + targetRemotePath = fmt.Sprintf("%s.%s", hashes, baseName) + } else { + // /. + targetRemotePath = fmt.Sprintf("%s/%s.%s", dirName, hashes, baseName) + } + } + fullURL := fmt.Sprintf("%s%s", targetBaseURL, targetRemotePath) + data, err := update.cfg.Fetcher.DownloadFile(fullURL, targetFile.Length, 0) + if err != nil { + return "", nil, err + } + err = targetFile.VerifyLengthHashes(data) + if err != nil { + return "", nil, err + } + + // do not persist the target file if cache is disabled + if !update.cfg.DisableLocalCache { + err = os.WriteFile(filePath, data, 0644) + if err != nil { + return "", nil, err + } + } + log.Info("Downloaded target", "path", targetFile.Path) + return filePath, data, nil +} + +// FindCachedTarget checks whether a local file is an up to date target +func (update *Updater) FindCachedTarget(targetFile *metadata.TargetFiles, filePath string) (string, []byte, error) { + var err error + targetFilePath := "" + // do not look for cached target file if cache is disabled + if update.cfg.DisableLocalCache { + return "", nil, nil + } + // get its path if not provided + if filePath == "" { + targetFilePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } else { + targetFilePath = filePath + } + // get file content + data, err := os.ReadFile(targetFilePath) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // verify if the length and hashes of this target file match the expected values + err = targetFile.VerifyLengthHashes(data) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // if all okay, return its path + return targetFilePath, data, nil +} + +// loadTimestamp load local and remote timestamp metadata +func (update *Updater) loadTimestamp() error { + log := metadata.GetLogger() + // try to read local timestamp + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP)) + if err != nil { + // this means there's no existing local timestamp so we should proceed downloading it without the need to UpdateTimestamp + log.Info("Local timestamp does not exist") + } else { + // local timestamp exists, let's try to verify it and load it to the trusted metadata set + _, err := update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrRepository{}) { + // local timestamp is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local timestamp is not valid") + } else { + // another error + return err + } + } + log.Info("Local timestamp is valid") + // all okay, local timestamp exists and it is valid, nevertheless proceed with downloading from remote + } + // load from remote (whether local load succeeded or not) + data, err = update.downloadMetadata(metadata.TIMESTAMP, update.cfg.TimestampMaxLength, "") + if err != nil { + return err + } + // try to verify and load the newly downloaded timestamp + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrEqualVersionNumber{}) { + // if the new timestamp version is the same as current, discard the + // new timestamp; this is normal and it shouldn't raise any error + return nil + } else { + // another error + return err + } + } + // proceed with persisting the new timestamp + err = update.persistMetadata(metadata.TIMESTAMP, data) + if err != nil { + return err + } + return nil +} + +// loadSnapshot load local (and if needed remote) snapshot metadata +func (update *Updater) loadSnapshot() error { + log := metadata.GetLogger() + // try to read local snapshot + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT)) + if err != nil { + // this means there's no existing local snapshot so we should proceed downloading it without the need to UpdateSnapshot + log.Info("Local snapshot does not exist") + } else { + // successfully read a local snapshot metadata, so let's try to verify and load it to the trusted metadata set + _, err = update.trusted.UpdateSnapshot(data, true) + if err != nil { + // this means snapshot verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local snapshot is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local snapshot is not valid") + } else { + // another error + return err + } + } else { + // this means snapshot verification/loading succeeded + log.Info("Local snapshot is valid: not downloading new one") + return nil + } + } + // local snapshot does not exist or is invalid, update from remote + log.Info("Failed to load local snapshot") + if update.trusted.Timestamp == nil { + return fmt.Errorf("trusted timestamp not set") + } + // extract the snapshot meta from the trusted timestamp metadata + snapshotMeta := update.trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // extract the length of the snapshot metadata to be downloaded + length := snapshotMeta.Length + if length == 0 { + length = update.cfg.SnapshotMaxLength + } + // extract which snapshot version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(snapshotMeta.Version, 10) + } + // download snapshot metadata + data, err = update.downloadMetadata(metadata.SNAPSHOT, length, version) + if err != nil { + return err + } + // verify and load the new snapshot + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + // persist the new snapshot + err = update.persistMetadata(metadata.SNAPSHOT, data) + if err != nil { + return err + } + return nil +} + +// loadTargets load local (and if needed remote) metadata for roleName +func (update *Updater) loadTargets(roleName, parentName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + // avoid loading "roleName" more than once during "GetTargetInfo" + role, ok := update.trusted.Targets[roleName] + if ok { + return role, nil + } + // try to read local targets + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, roleName)) + if err != nil { + // this means there's no existing local target file so we should proceed downloading it without the need to UpdateDelegatedTargets + log.Info("Local role does not exist", "role", roleName) + } else { + // successfully read a local targets metadata, so let's try to verify and load it to the trusted metadata set + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + // this means targets verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local target file is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local role is not valid", "role", roleName) + } else { + // another error + return nil, err + } + } else { + // this means targets verification/loading succeeded + log.Info("Local role is valid: not downloading new one", "role", roleName) + return delegatedTargets, nil + } + } + // local "roleName" does not exist or is invalid, update from remote + log.Info("Failed to load local role", "role", roleName) + if update.trusted.Snapshot == nil { + return nil, fmt.Errorf("trusted snapshot not set") + } + // extract the targets' meta from the trusted snapshot metadata + metaInfo, ok := update.trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + if !ok { + return nil, fmt.Errorf("role %s not found in snapshot", roleName) + } + // extract the length of the target metadata to be downloaded + length := metaInfo.Length + if length == 0 { + length = update.cfg.TargetsMaxLength + } + // extract which target metadata version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(metaInfo.Version, 10) + } + // download targets metadata + data, err = update.downloadMetadata(roleName, length, version) + if err != nil { + return nil, err + } + // verify and load the new target metadata + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + return nil, err + } + // persist the new target metadata + err = update.persistMetadata(roleName, data) + if err != nil { + return nil, err + } + return delegatedTargets, nil +} + +// loadRoot load remote root metadata. Sequentially load and +// persist on local disk every newer root metadata version +// available on the remote +func (update *Updater) loadRoot() error { + // calculate boundaries + lowerBound := update.trusted.Root.Signed.Version + 1 + upperBound := lowerBound + update.cfg.MaxRootRotations + + // loop until we find the latest available version of root (download -> verify -> load -> persist) + for nextVersion := lowerBound; nextVersion < upperBound; nextVersion++ { + data, err := update.downloadMetadata(metadata.ROOT, update.cfg.RootMaxLength, strconv.FormatInt(nextVersion, 10)) + if err != nil { + // downloading the root metadata failed for some reason + var tmpErr *metadata.ErrDownloadHTTP + if errors.As(err, &tmpErr) { + if tmpErr.StatusCode != http.StatusNotFound { + // unexpected HTTP status code + return err + } + // 404 means current root is newest available, so we can stop the loop and move forward + break + } + // some other error ocurred + return err + } else { + // downloading root metadata succeeded, so let's try to verify and load it + _, err = update.trusted.UpdateRoot(data) + if err != nil { + return err + } + // persist root metadata to disk + err = update.persistMetadata(metadata.ROOT, data) + if err != nil { + return err + } + } + } + return nil +} + +// preOrderDepthFirstWalk interrogates the tree of target delegations +// in order of appearance (which implicitly order trustworthiness), +// and returns the matching target found in the most trusted role. +func (update *Updater) preOrderDepthFirstWalk(targetFilePath string) (*metadata.TargetFiles, error) { + log := metadata.GetLogger() + // list of delegations to be interrogated. A (role, parent role) pair + // is needed to load and verify the delegated targets metadata + delegationsToVisit := []roleParentTuple{{ + Role: metadata.TARGETS, + Parent: metadata.ROOT, + }} + visitedRoleNames := map[string]bool{} + // pre-order depth-first traversal of the graph of target delegations + for len(visitedRoleNames) <= update.cfg.MaxDelegations && len(delegationsToVisit) > 0 { + // pop the role name from the top of the stack + delegation := delegationsToVisit[len(delegationsToVisit)-1] + delegationsToVisit = delegationsToVisit[:len(delegationsToVisit)-1] + // skip any visited current role to prevent cycles + _, ok := visitedRoleNames[delegation.Role] + if ok { + log.Info("Skipping visited current role", "role", delegation.Role) + continue + } + // the metadata for delegation.Role must be downloaded/updated before + // its targets, delegations, and child roles can be inspected + targets, err := update.loadTargets(delegation.Role, delegation.Parent) + if err != nil { + return nil, err + } + target, ok := targets.Signed.Targets[targetFilePath] + if ok { + log.Info("Found target in current role", "role", delegation.Role) + return target, nil + } + // after pre-order check, add current role to set of visited roles + visitedRoleNames[delegation.Role] = true + if targets.Signed.Delegations != nil { + var childRolesToVisit []roleParentTuple + // note that this may be a slow operation if there are many + // delegated roles + roles := targets.Signed.Delegations.GetRolesForTarget(targetFilePath) + for _, rolesForTarget := range roles { + log.Info("Adding child role", "role", rolesForTarget.Name) + childRolesToVisit = append(childRolesToVisit, roleParentTuple{Role: rolesForTarget.Name, Parent: delegation.Role}) + if rolesForTarget.Terminating { + log.Info("Not backtracking to other roles") + delegationsToVisit = []roleParentTuple{} + break + } + } + // push childRolesToVisit in reverse order of appearance + // onto delegationsToVisit. Roles are popped from the end of + // the list + slices.Reverse(childRolesToVisit) + delegationsToVisit = append(delegationsToVisit, childRolesToVisit...) + } + } + if len(delegationsToVisit) > 0 { + log.Info("Too many roles left to visit for max allowed delegations", + "roles-left", len(delegationsToVisit), + "allowed-delegations", update.cfg.MaxDelegations) + } + // if this point is reached then target is not found, return nil + return nil, fmt.Errorf("target %s not found", targetFilePath) +} + +// persistMetadata writes metadata to disk atomically to avoid data loss +func (update *Updater) persistMetadata(roleName string, data []byte) error { + log := metadata.GetLogger() + // do not persist the metadata if we have disabled local caching + if update.cfg.DisableLocalCache { + return nil + } + // caching enabled, proceed with persisting the metadata locally + fileName := filepath.Join(update.cfg.LocalMetadataDir, fmt.Sprintf("%s.json", url.PathEscape(roleName))) + // create a temporary file + file, err := os.CreateTemp(update.cfg.LocalMetadataDir, "tuf_tmp") + if err != nil { + return err + } + // change the file permissions to our desired permissions + err = file.Chmod(0644) + if err != nil { + // close and delete the temporary file if there was an error while writing + file.Close() + errRemove := os.Remove(file.Name()) + if errRemove != nil { + log.Info("Failed to delete temporary file", "name", file.Name()) + } + return err + } + // write the data content to the temporary file + _, err = file.Write(data) + if err != nil { + // close and delete the temporary file if there was an error while writing + file.Close() + errRemove := os.Remove(file.Name()) + if errRemove != nil { + log.Info("Failed to delete temporary file", "name", file.Name()) + } + return err + } + + // can't move/rename an open file on windows, so close it first + err = file.Close() + if err != nil { + return err + } + // if all okay, rename the temporary file to the desired one + err = os.Rename(file.Name(), fileName) + if err != nil { + return err + } + read, err := os.ReadFile(fileName) + if err != nil { + return err + } + if string(read) != string(data) { + return fmt.Errorf("failed to persist metadata") + } + return nil +} + +// downloadMetadata download a metadata file and return it as bytes +func (update *Updater) downloadMetadata(roleName string, length int64, version string) ([]byte, error) { + urlPath := ensureTrailingSlash(update.cfg.RemoteMetadataURL) + // build urlPath + if version == "" { + urlPath = fmt.Sprintf("%s%s.json", urlPath, url.PathEscape(roleName)) + } else { + urlPath = fmt.Sprintf("%s%s.%s.json", urlPath, version, url.PathEscape(roleName)) + } + return update.cfg.Fetcher.DownloadFile(urlPath, length, 0) +} + +// generateTargetFilePath generates path from TargetFiles +func (update *Updater) generateTargetFilePath(tf *metadata.TargetFiles) (string, error) { + // LocalTargetsDir can be omitted if caching is disabled + if update.cfg.LocalTargetsDir == "" && !update.cfg.DisableLocalCache { + return "", &metadata.ErrValue{Msg: "LocalTargetsDir must be set if filepath is not given"} + } + // Use URL encoded target path as filename + return filepath.Join(update.cfg.LocalTargetsDir, url.PathEscape(tf.Path)), nil +} + +// loadLocalMetadata reads a local .json file and returns its bytes +func (update *Updater) loadLocalMetadata(roleName string) ([]byte, error) { + return os.ReadFile(fmt.Sprintf("%s.json", roleName)) +} + +// GetTopLevelTargets returns the top-level target files +func (update *Updater) GetTopLevelTargets() map[string]*metadata.TargetFiles { + return update.trusted.Targets[metadata.TARGETS].Signed.Targets +} + +// GetTrustedMetadataSet returns the trusted metadata set +func (update *Updater) GetTrustedMetadataSet() trustedmetadata.TrustedMetadata { + return *update.trusted +} + +// UnsafeSetRefTime sets the reference time that the updater uses. +// This should only be done in tests. +// Using this function is useful when testing time-related behavior in go-tuf. +func (update *Updater) UnsafeSetRefTime(t time.Time) { + update.trusted.RefTime = t +} + +func IsWindowsPath(path string) bool { + match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path) + return match +} + +// ensureTrailingSlash ensures url ends with a slash +func ensureTrailingSlash(url string) string { + if IsWindowsPath(url) { + slash := string(filepath.Separator) + if strings.HasSuffix(url, slash) { + return url + } + return url + slash + } + if strings.HasSuffix(url, "/") { + return url + } + return url + "/" +} diff --git a/vendor/github.com/transparency-dev/formats/LICENSE b/vendor/github.com/transparency-dev/formats/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/transparency-dev/formats/log/README.md b/vendor/github.com/transparency-dev/formats/log/README.md new file mode 100644 index 00000000000..887ce47b2a1 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/README.md @@ -0,0 +1,127 @@ +# Checkpoint format + +This directory contains a description and supporting golang code for +a reusable Checkpoint format which the TrustFabric team uses in various +projects. + +The format itself is heavily based on the +[golang sumbdb head](https://sum.golang.org/latest), and corresponding +[signed note](https://pkg.go.dev/golang.org/x/mod/sumdb/note) formats, +and consists of two parts: a signed envelope, and a body. + +### Signed envelope + +The envelope (signed note) is of the form: + +* One or more non-empty lines, each terminated by `\n` (the `body`) +* One line consisting of just one `\n` (i.e. a blank line) +* One or more `signature` lines, each terminated by `\n` + +All signatures commit to the body only (including its trailing newline, but not +the blank line's newline - see below for an example). + +The signature(s) themselves are in the sumdb note format (concrete example +[below](#example)): + +`– ` +where: + +* `–` is an emdash (U+2014) +* `` gives a human-readable representation of the signing ID + +and the `signature_bytes` are prefixed with the first 4 bytes of the SHA256 hash +of the associated public key to act as a hint in identifying the correct key to +verify with. + +For guidance on generating keys, see the +[note documentation](https://pkg.go.dev/golang.org/x/mod/sumdb/note#hdr-Generating_Keys) +and [implementation](https://cs.opensource.google/go/x/mod/+/master:sumdb/note/note.go;l=368;drc=ed3ec21bb8e252814c380df79a80f366440ddb2d). +Of particular note is that the public key and its hash commit to the algorithm +identifier. + +**Differences from sumdb note:** +Whereas the golang signed note *implementation* currently supports only Ed25519 +signatures, the format itself is not restricted to this scheme. + +### Checkpoint body + +The checkpoint body is of the form: + +```text + + + +[otherdata] +``` + +The first 3 lines of the body **MUST** be present in all Checkpoints. + +* `` should be a unique identifier for the log identity which issued the checkpoint. + The format SHOULD be a URI-like structure like `[/]`, where the log operator + controls ``, e.g `example.com/log42`. This is only a recommendation, and clients MUST + NOT assume that the origin is a URI following this format. This structure reduces the likelihood + of origin collision, and gives clues to humans about the log operator and what is in the log. The + suffix is optional and can be anything. It is used to disambiguate logs owned under the same + prefix. + + The presence of this identifier forms part of the log claim, and guards against two + logs producing bytewise identical checkpoints. + +* `` is the ASCII decimal representation of the number of leaves committed + to by this checkpoint. It should not have leading zeroes. + +* `` is an + [RFC4684 standard encoding](https://datatracker.ietf.org/doc/html/rfc4648#section-4) base-64 + representation of the log root hash at the specified log size. + +* `[otherdata]` is opaque and optional, and, if necessary, can be used to tie extra + data to the checkpoint, however its format must conform to the sumdb signed + note spec (e.g. it must not contain blank lines.) + +> Note that golang sumdb implementation is already compatible with this +`[otherdata]` extension (see +). +If you plan to use `otherdata` in your log, see the section on [merging checkpoints](#merging-checkpoints). + +The first signature on a checkpoint should be from the log which issued it, but there MUST NOT +be more than one signature from a log identity present on the checkpoint. + +## Example + +An annotated example signed checkpoint in this format is shown below: + +![format](images/format.png) + + +This checkpoint was issued by the log known as "Moon Log", the log's size is +4027504, in the `other data` section a timestamp is encoded as a 64bit hex +value, and further application-specific data relating to the phase of the moon +at the point the checkpoint was issued is supplied following that. + +## Merging Checkpoints + +This checkpoint format allows a checkpoint that has been independently signed by +multiple identities to be merged, creating a single checkpoint with multiple +signatures. This is particularly useful for witnessing, where witnesses will +independently check consistency of the log and produce a counter-signed copy +containing two signatures: one for the log, and one for the witness. + +The ability to merge signatures for the same body is a useful optimization. +Clients that require N witness signatures will not be required to fetch N checkpoints. +Instead they can fetch a single checkpoint and confirm it has the N required +signatures (in addition to the log signature). + +Note that this optimization requires the checkpoint _body_ to be byte-equivalent. +The log signature does not need to be equal; when merging, only one of the log's +signatures over this body will be propagated. The core checkpoint format above +allows merging for any two consistent checkpoints for the same tree size. +However, if the `otherdata` extension is used then this can lead to checkpoints +that cannot be merged, even at the same tree size. + +We recommend that log operators using `otherdata` consider carefully what +information is included in this. If data is included in `otherdata` that is not +fixed for a given tree size, then this can easily lead to unmergeable checkpoints. +The most commonly anticipated cause for this would be including the timestamp at +which the checkpoint was requested within the `otherdata`. In this case, no two +witnesses are likely to ever acquire the same checkpoint body. There may be cases +where this is unavoidable, but this consequence should be considered in the design. diff --git a/vendor/github.com/transparency-dev/formats/log/checkpoint.go b/vendor/github.com/transparency-dev/formats/log/checkpoint.go new file mode 100644 index 00000000000..3bbaacb3893 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/checkpoint.go @@ -0,0 +1,79 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package log provides basic support for the common log checkpoint and proof +// format described by the README in this directory. +package log + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "strconv" +) + +// Checkpoint represents a minimal log checkpoint (STH). +type Checkpoint struct { + // Origin is the string identifying the log which issued this checkpoint. + Origin string + // Size is the number of entries in the log at this checkpoint. + Size uint64 + // Hash is the hash which commits to the contents of the entire log. + Hash []byte +} + +// Marshal returns the common format representation of this Checkpoint. +func (c Checkpoint) Marshal() []byte { + return []byte(fmt.Sprintf("%s\n%d\n%s\n", c.Origin, c.Size, base64.StdEncoding.EncodeToString(c.Hash))) +} + +// Unmarshal parses the common formatted checkpoint data and stores the result +// in the Checkpoint. +// +// The supplied data is expected to begin with the following 3 lines of text, +// each followed by a newline: +// - +// - +// - +// +// Any trailing data after this will be returned. +func (c *Checkpoint) Unmarshal(data []byte) ([]byte, error) { + l := bytes.SplitN(data, []byte("\n"), 4) + if len(l) < 4 { + return nil, errors.New("invalid checkpoint - too few newlines") + } + origin := string(l[0]) + if len(origin) == 0 { + return nil, errors.New("invalid checkpoint - empty origin") + } + size, err := strconv.ParseUint(string(l[1]), 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid checkpoint - size invalid: %w", err) + } + h, err := base64.StdEncoding.DecodeString(string(l[2])) + if err != nil { + return nil, fmt.Errorf("invalid checkpoint - invalid hash: %w", err) + } + var rest []byte + if len(l[3]) > 0 { + rest = l[3] + } + *c = Checkpoint{ + Origin: origin, + Size: size, + Hash: h, + } + return rest, nil +} diff --git a/vendor/github.com/transparency-dev/formats/log/identifier.go b/vendor/github.com/transparency-dev/formats/log/identifier.go new file mode 100644 index 00000000000..424794541f7 --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/identifier.go @@ -0,0 +1,30 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "crypto/sha256" + "fmt" +) + +// ID returns the identifier to use for a log given the Origin. This is the ID +// used to find checkpoints for this log at distributors, and that will be used +// to feed checkpoints to witnesses. +func ID(origin string) string { + s := sha256.New() + s.Write([]byte("o:")) + s.Write([]byte(origin)) + return fmt.Sprintf("%x", s.Sum(nil)) +} diff --git a/vendor/github.com/transparency-dev/formats/log/note.go b/vendor/github.com/transparency-dev/formats/log/note.go new file mode 100644 index 00000000000..45ea536841f --- /dev/null +++ b/vendor/github.com/transparency-dev/formats/log/note.go @@ -0,0 +1,56 @@ +// Copyright 2021 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" + + "golang.org/x/mod/sumdb/note" +) + +// ParseCheckpoint takes a raw checkpoint as bytes and returns a parsed checkpoint +// and any otherData in the body, providing that: +// * a valid log signature is found; and +// * the checkpoint unmarshals correctly; and +// * the log origin is that expected. +// In all other cases, an empty checkpoint is returned. The underlying note is always +// returned where possible. +// The signatures on the note will include the log signature if no error is returned, +// plus any signatures from otherVerifiers that were found. +func ParseCheckpoint(chkpt []byte, origin string, logVerifier note.Verifier, otherVerifiers ...note.Verifier) (*Checkpoint, []byte, *note.Note, error) { + vs := append(append(make([]note.Verifier, 0, len(otherVerifiers)+1), logVerifier), otherVerifiers...) + verifiers := note.VerifierList(vs...) + + n, err := note.Open(chkpt, verifiers) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to verify signatures on checkpoint: %v", err) + } + + for _, s := range n.Sigs { + if s.Hash == logVerifier.KeyHash() && s.Name == logVerifier.Name() { + // The log has signed this checkpoint. It is now safe to parse. + cp := &Checkpoint{} + var otherData []byte + if otherData, err = cp.Unmarshal([]byte(n.Text)); err != nil { + return nil, nil, n, fmt.Errorf("failed to unmarshal checkpoint: %v", err) + } + if cp.Origin != origin { + return nil, nil, n, fmt.Errorf("got Origin %q but expected %q", cp.Origin, origin) + } + return cp, otherData, n, nil + } + } + return nil, nil, n, fmt.Errorf("no log signature found on note") +} diff --git a/vendor/github.com/transparency-dev/merkle/.golangci.yaml b/vendor/github.com/transparency-dev/merkle/.golangci.yaml new file mode 100644 index 00000000000..0675e1ef409 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/.golangci.yaml @@ -0,0 +1,11 @@ +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + deadline: 90s + +linters-settings: + depguard: + list-type: blacklist + packages: + - golang.org/x/net/context + - github.com/gogo/protobuf/proto + diff --git a/vendor/github.com/transparency-dev/merkle/CHANGELOG.md b/vendor/github.com/transparency-dev/merkle/CHANGELOG.md new file mode 100644 index 00000000000..b05ee0adcbd --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/CHANGELOG.md @@ -0,0 +1,10 @@ +# MERKLE changelog + +## HEAD + +## v0.0.2 + * Fuzzing support + * Dependency updates, notably to go1.19 + +## v0.0.1 +Initial release diff --git a/vendor/github.com/transparency-dev/merkle/CODEOWNERS b/vendor/github.com/transparency-dev/merkle/CODEOWNERS new file mode 100644 index 00000000000..1b413465841 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/CODEOWNERS @@ -0,0 +1 @@ +* @transparency-dev/core-team diff --git a/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md new file mode 100644 index 00000000000..43de4c9d470 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + +Once your CLA is submitted (or if you already submitted one for +another Google project), make a commit adding yourself to the +[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part +of your first [pull request][]. + +[AUTHORS]: AUTHORS +[CONTRIBUTORS]: CONTRIBUTORS + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/transparency-dev/merkle/LICENSE b/vendor/github.com/transparency-dev/merkle/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/transparency-dev/merkle/README.md b/vendor/github.com/transparency-dev/merkle/README.md new file mode 100644 index 00000000000..3c8d2127110 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/README.md @@ -0,0 +1,25 @@ +# Merkle + +[![Go Reference](https://pkg.go.dev/badge/github.com/transparency-dev/merkle.svg)](https://pkg.go.dev/github.com/transparency-dev/merkle) +[![Go Report +Card](https://goreportcard.com/badge/github.com/transparency-dev/merkle)](https://goreportcard.com/report/github.com/transparency-dev/merkle) +[![codecov](https://codecov.io/gh/transparency-dev/merkle/branch/main/graph/badge.svg?token=BBCRAMOBY2)](https://codecov.io/gh/transparency-dev/merkle) +[![Slack +Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/) + +## Overview + +This repository contains Go code to help create and manipulate Merkle trees, as +well as constructing and verifying various types of proof. + +This is the data structure which is used by projects such as +[Trillian](https://github.com/google/trillian) to provide +[verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log). + + +## Support +* Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency +* Slack: https://gtrillian.slack.com/ (invitation) + + + diff --git a/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml b/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml new file mode 100644 index 00000000000..eafbc790a08 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml @@ -0,0 +1,26 @@ +timeout: 300s +options: + machineType: E2_HIGHCPU_32 + volumes: + - name: go-modules + path: /go + env: + - GOPROXY=https://proxy.golang.org + - PROJECT_ROOT=github.com/transparency-dev/merkle + - GOPATH=/go + +# Cloud Build logs sent to GCS bucket +logsBucket: 'gs://trillian-cloudbuild-logs' + +steps: +- id: 'lint' + name: "golangci/golangci-lint:v1.51" + args: ["golangci-lint", "run", "--timeout", "10m"] + +- id: 'unit tests' + name: 'golang:1.19' + args: ['go', 'test', './...'] + +- id: 'build' + name: 'golang:1.19' + args: ['go', 'build', './...'] diff --git a/vendor/github.com/transparency-dev/merkle/compact/nodes.go b/vendor/github.com/transparency-dev/merkle/compact/nodes.go new file mode 100644 index 00000000000..41c0854e483 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/compact/nodes.go @@ -0,0 +1,89 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compact + +import "math/bits" + +// NodeID identifies a node of a Merkle tree. +// +// The ID consists of a level and index within this level. Levels are numbered +// from 0, which corresponds to the tree leaves. Within each level, nodes are +// numbered with consecutive indices starting from 0. +// +// L4: ┌───────0───────┐ ... +// L3: ┌───0───┐ ┌───1───┐ ┌─── ... +// L2: ┌─0─┐ ┌─1─┐ ┌─2─┐ ┌─3─┐ ┌─4─┐ ... +// L1: ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ... +// L0: 0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ... +// +// When the tree is not perfect, the nodes that would complement it to perfect +// are called ephemeral. Algorithms that operate with ephemeral nodes still map +// them to the same address space. +type NodeID struct { + Level uint + Index uint64 +} + +// NewNodeID returns a NodeID with the passed in node coordinates. +func NewNodeID(level uint, index uint64) NodeID { + return NodeID{Level: level, Index: index} +} + +// Parent returns the ID of the parent node. +func (id NodeID) Parent() NodeID { + return NewNodeID(id.Level+1, id.Index>>1) +} + +// Sibling returns the ID of the sibling node. +func (id NodeID) Sibling() NodeID { + return NewNodeID(id.Level, id.Index^1) +} + +// Coverage returns the [begin, end) range of leaves covered by the node. +func (id NodeID) Coverage() (uint64, uint64) { + return id.Index << id.Level, (id.Index + 1) << id.Level +} + +// RangeNodes appends the IDs of the nodes that comprise the [begin, end) +// compact range to the given slice, and returns the new slice. The caller may +// pre-allocate space with the help of the RangeSize function. +func RangeNodes(begin, end uint64, ids []NodeID) []NodeID { + left, right := Decompose(begin, end) + + pos := begin + // Iterate over perfect subtrees along the left border of the range, ordered + // from lower to upper levels. + for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit { + level := uint(bits.TrailingZeros64(left)) + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + // Iterate over perfect subtrees along the right border of the range, ordered + // from upper to lower levels. + for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit { + level := uint(bits.Len64(right)) - 1 + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + return ids +} + +// RangeSize returns the number of nodes in the [begin, end) compact range. +func RangeSize(begin, end uint64) int { + left, right := Decompose(begin, end) + return bits.OnesCount64(left) + bits.OnesCount64(right) +} diff --git a/vendor/github.com/transparency-dev/merkle/compact/range.go b/vendor/github.com/transparency-dev/merkle/compact/range.go new file mode 100644 index 00000000000..0952646c4f6 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/compact/range.go @@ -0,0 +1,264 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compact provides compact Merkle tree data structures. +package compact + +import ( + "bytes" + "errors" + "fmt" + "math/bits" +) + +// HashFn computes an internal node's hash using the hashes of its child nodes. +type HashFn func(left, right []byte) []byte + +// VisitFn visits the node with the specified ID and hash. +type VisitFn func(id NodeID, hash []byte) + +// RangeFactory allows creating compact ranges with the specified hash +// function, which must not be nil, and must not be changed. +type RangeFactory struct { + Hash HashFn +} + +// NewRange creates a Range for [begin, end) with the given set of hashes. The +// hashes correspond to the roots of the minimal set of perfect sub-trees +// covering the [begin, end) leaves range, ordered left to right. +func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) { + if end < begin { + return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin) + } + if got, want := len(hashes), RangeSize(begin, end); got != want { + return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want) + } + return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil +} + +// NewEmptyRange returns a new Range for an empty [begin, begin) range. The +// value of begin defines where the range will start growing from when entries +// are appended to it. +func (f *RangeFactory) NewEmptyRange(begin uint64) *Range { + return &Range{f: f, begin: begin, end: begin} +} + +// Range represents a compact Merkle tree range for leaf indices [begin, end). +// +// It contains the minimal set of perfect subtrees whose leaves comprise this +// range. The structure is efficiently mergeable with other compact ranges that +// share one of the endpoints with it. +// +// For more details, see +// https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md. +type Range struct { + f *RangeFactory + begin uint64 + end uint64 + hashes [][]byte +} + +// Begin returns the first index covered by the range (inclusive). +func (r *Range) Begin() uint64 { + return r.begin +} + +// End returns the last index covered by the range (exclusive). +func (r *Range) End() uint64 { + return r.end +} + +// Hashes returns sub-tree hashes corresponding to the minimal set of perfect +// sub-trees covering the [begin, end) range, ordered left to right. +func (r *Range) Hashes() [][]byte { + return r.hashes +} + +// Append extends the compact range by appending the passed in hash to it. It +// reports all the added nodes through the visitor function (if non-nil). +func (r *Range) Append(hash []byte, visitor VisitFn) error { + if visitor != nil { + visitor(NewNodeID(0, r.end), hash) + } + return r.appendImpl(r.end+1, hash, nil, visitor) +} + +// AppendRange extends the compact range by merging in the other compact range +// from the right. It uses the tree hasher to calculate hashes of newly created +// nodes, and reports them through the visitor function (if non-nil). +func (r *Range) AppendRange(other *Range, visitor VisitFn) error { + if other.f != r.f { + return errors.New("incompatible ranges") + } + if got, want := other.begin, r.end; got != want { + return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want) + } + if len(other.hashes) == 0 { // The other range is empty, merging is trivial. + return nil + } + return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor) +} + +// GetRootHash returns the root hash of the Merkle tree represented by this +// compact range. Requires the range to start at index 0. If the range is +// empty, returns nil. +// +// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the +// ones rooting imperfect subtrees) along the right border of the tree. +func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) { + if r.begin != 0 { + return nil, fmt.Errorf("begin=%d, want 0", r.begin) + } + ln := len(r.hashes) + if ln == 0 { + return nil, nil + } + hash := r.hashes[ln-1] + // All non-perfect subtree hashes along the right border of the tree + // correspond to the parents of all perfect subtree nodes except the lowest + // one (therefore the loop skips it). + for i, size := ln-2, r.end; i >= 0; i-- { + hash = r.f.Hash(r.hashes[i], hash) + if visitor != nil { + size &= size - 1 // Delete the previous node. + level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level. + index := size >> level // And its horizontal index. + visitor(NewNodeID(level, index), hash) + } + } + return hash, nil +} + +// Equal compares two Ranges for equality. +func (r *Range) Equal(other *Range) bool { + if r.f != other.f || r.begin != other.begin || r.end != other.end { + return false + } + if len(r.hashes) != len(other.hashes) { + return false + } + for i := range r.hashes { + if !bytes.Equal(r.hashes[i], other.hashes[i]) { + return false + } + } + return true +} + +// appendImpl extends the compact range by merging the [r.end, end) compact +// range into it. The other compact range is decomposed into a seed hash and +// all the other hashes (possibly none). The method uses the tree hasher to +// calculate hashes of newly created nodes, and reports them through the +// visitor function (if non-nil). +func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error { + // Bits [low, high) of r.end encode the merge path, i.e. the sequence of node + // merges that transforms the two compact ranges into one. + low, high := getMergePath(r.begin, r.end, end) + if high < low { + high = low + } + index := r.end >> low + // Now bits [0, high-low) of index encode the merge path. + + // The number of one bits in index is the number of nodes from the left range + // that will be merged, and zero bits correspond to the nodes in the right + // range. Below we make sure that both ranges have enough hashes, which can + // be false only in case the data is corrupted in some way. + ones := bits.OnesCount64(index & (1<<(high-low) - 1)) + if ln := len(r.hashes); ln < ones { + return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones) + } + if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros { + return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1) + } + + // Some of the trailing nodes of the left compact range, and some of the + // leading nodes of the right range, are sequentially merged with the seed, + // according to the mask. All new nodes are reported through the visitor. + idx1, idx2 := len(r.hashes), 0 + for h := low; h < high; h++ { + if index&1 == 0 { + seed = r.f.Hash(seed, hashes[idx2]) + idx2++ + } else { + idx1-- + seed = r.f.Hash(r.hashes[idx1], seed) + } + index >>= 1 + if visitor != nil { + visitor(NewNodeID(h+1, index), seed) + } + } + + // All nodes from both ranges that have not been merged are bundled together + // with the "merged" seed node. + r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...) + r.end = end + return nil +} + +// getMergePath returns the merging path between the compact range [begin, mid) +// and [mid, end). The path is represented as a range of bits within mid, with +// bit indices [low, high). A bit value of 1 on level i of mid means that the +// node on this level merges with the corresponding node in the left compact +// range, whereas 0 represents merging with the right compact range. If the +// path is empty then high <= low. +// +// The output is not specified if begin <= mid <= end doesn't hold, but the +// function never panics. +func getMergePath(begin, mid, end uint64) (uint, uint) { + low := bits.TrailingZeros64(mid) + high := 64 + if begin != 0 { + high = bits.Len64(mid ^ (begin - 1)) + } + if high2 := bits.Len64((mid - 1) ^ end); high2 < high { + high = high2 + } + return uint(low), uint(high - 1) +} + +// Decompose splits the [begin, end) range into a minimal number of sub-ranges, +// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for +// some integers m, k >= 0. +// +// The sequence of sizes is returned encoded as bitmasks left and right, where: +// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k +// - left mask bits in LSB-to-MSB order encode the left part of the sequence +// - right mask bits in MSB-to-LSB order encode the right part +// +// The corresponding values of m are not returned (they can be calculated from +// begin and the sub-range sizes). +// +// For example, (begin, end) values of (0b110, 0b11101) would indicate a +// sequence of tree sizes: 2,8; 8,4,1. +// +// The output is not specified if begin > end, but the function never panics. +func Decompose(begin, end uint64) (uint64, uint64) { + // Special case, as the code below works only if begin != 0, or end < 2^63. + if begin == 0 { + return 0, end + } + xbegin := begin - 1 + // Find where paths to leaves #begin-1 and #end diverge, and mask the upper + // bits away, as only the nodes strictly below this point are in the range. + d := bits.Len64(xbegin^end) - 1 + mask := uint64(1)<= size { + return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size) + } + return nodes(index, 0, size).skipFirst(), nil +} + +// Consistency returns the information on how to fetch and construct a +// consistency proof between the two given tree sizes of a log Merkle tree. It +// requires 0 <= size1 <= size2. +func Consistency(size1, size2 uint64) (Nodes, error) { + if size1 > size2 { + return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2) + } + if size1 == size2 || size1 == 0 { + return Nodes{IDs: []compact.NodeID{}}, nil + } + + // Find the root of the biggest perfect subtree that ends at size1. + level := uint(bits.TrailingZeros64(size1)) + index := (size1 - 1) >> level + // The consistency proof consists of this node (except if size1 is a power of + // two, in which case adding this node would be redundant because the client + // is assumed to know it from a checkpoint), and nodes of the inclusion proof + // into this node in the tree of size2. + p := nodes(index, level, size2) + + // Handle the case when size1 is a power of 2. + if index == 0 { + return p.skipFirst(), nil + } + return p, nil +} + +// nodes returns the node IDs necessary to prove that the (level, index) node +// is included in the Merkle tree of the given size. +func nodes(index uint64, level uint, size uint64) Nodes { + // Compute the `fork` node, where the path from root to (level, index) node + // diverges from the path to (0, size). + // + // The sibling of this node is the ephemeral node which represents a subtree + // that is not complete in the tree of the given size. To compute the hash + // of the ephemeral node, we need all the non-ephemeral nodes that cover the + // same range of leaves. + // + // The `inner` variable is how many layers up from (level, index) the `fork` + // and the ephemeral nodes are. + inner := bits.Len64(index^(size>>level)) - 1 + fork := compact.NewNodeID(level+uint(inner), index>>inner) + + begin, end := fork.Coverage() + left := compact.RangeSize(0, begin) + right := compact.RangeSize(end, size) + + node := compact.NewNodeID(level, index) + // Pre-allocate the exact number of nodes for the proof, in order: + // - The seed node for which we are building the proof. + // - The `inner` nodes at each level up to the fork node. + // - The `right` nodes, comprising the ephemeral node. + // - The `left` nodes, completing the coverage of the whole [0, size) range. + nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node) + + // The first portion of the proof consists of the siblings for nodes of the + // path going up to the level at which the ephemeral node appears. + for ; node.Level < fork.Level; node = node.Parent() { + nodes = append(nodes, node.Sibling()) + } + // This portion of the proof covers the range [begin, end) under it. The + // ranges to the left and to the right from it remain to be covered. + + // Add all the nodes (potentially none) that cover the right range, and + // represent the ephemeral node. Reverse them so that the Rehash method can + // process hashes in the convenient order, from lower to upper levels. + len1 := len(nodes) + nodes = compact.RangeNodes(end, size, nodes) + reverse(nodes[len(nodes)-right:]) + len2 := len(nodes) + // Add the nodes that cover the left range, ordered increasingly by level. + nodes = compact.RangeNodes(0, begin, nodes) + reverse(nodes[len(nodes)-left:]) + + // nodes[len1:len2] contains the nodes representing the ephemeral node. If + // it's empty, make it zero. Note that it can also contain a single node. + // Depending on the preference of the layer above, it may or may not be + // considered ephemeral. + if len1 >= len2 { + len1, len2 = 0, 0 + } + + return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()} +} + +// Ephem returns the ephemeral node, and indices begin and end, such that +// IDs[begin:end] slice contains the child nodes of the ephemeral node. +// +// The list is empty iff there are no ephemeral nodes in the proof. Some +// examples of when this can happen: a proof in a perfect tree; an inclusion +// proof for a leaf in a perfect subtree at the right edge of the tree. +func (n Nodes) Ephem() (compact.NodeID, int, int) { + return n.ephem, n.begin, n.end +} + +// Rehash computes the proof based on the slice of node hashes corresponding to +// their IDs in the n.IDs field. The slices must be of the same length. The hc +// parameter computes a node's hash based on hashes of its children. +// +// Warning: The passed-in slice of hashes can be modified in-place. +func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) { + if got, want := len(h), len(n.IDs); got != want { + return nil, fmt.Errorf("got %d hashes but expected %d", got, want) + } + cursor := 0 + // Scan the list of node hashes, and store the rehashed list in-place. + // Invariant: cursor <= i, and h[:cursor] contains all the hashes of the + // rehashed list after scanning h up to index i-1. + for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 { + hash := h[i] + if i >= n.begin && i < n.end { + // Scan the block of node hashes that need rehashing. + for i++; i < n.end; i++ { + hash = hc(h[i], hash) + } + i-- + } + h[cursor] = hash + } + return h[:cursor], nil +} + +func (n Nodes) skipFirst() Nodes { + n.IDs = n.IDs[1:] + // Fixup the indices into the IDs slice. + if n.begin < n.end { + n.begin-- + n.end-- + } + return n +} + +func reverse(ids []compact.NodeID) { + for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { + ids[i], ids[j] = ids[j], ids[i] + } +} diff --git a/vendor/github.com/transparency-dev/merkle/proof/verify.go b/vendor/github.com/transparency-dev/merkle/proof/verify.go new file mode 100644 index 00000000000..d42e1afe36f --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/proof/verify.go @@ -0,0 +1,176 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proof + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + + "github.com/transparency-dev/merkle" +) + +// RootMismatchError occurs when an inclusion proof fails. +type RootMismatchError struct { + ExpectedRoot []byte + CalculatedRoot []byte +} + +func (e RootMismatchError) Error() string { + return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot) +} + +func verifyMatch(calculated, expected []byte) error { + if !bytes.Equal(calculated, expected) { + return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated} + } + return nil +} + +// VerifyInclusion verifies the correctness of the inclusion proof for the leaf +// with the specified hash and index, relatively to the tree of the given size +// and root hash. Requires 0 <= index < size. +func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error { + calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof) + if err != nil { + return err + } + return verifyMatch(calcRoot, root) +} + +// RootFromInclusionProof calculates the expected root hash for a tree of the +// given size, provided a leaf index and hash with the corresponding inclusion +// proof. Requires 0 <= index < size. +func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) { + if index >= size { + return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size) + } + if got, want := len(leafHash), hasher.Size(); got != want { + return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want) + } + + inner, border := decompInclProof(index, size) + if got, want := len(proof), inner+border; got != want { + return nil, fmt.Errorf("wrong proof size %d, want %d", got, want) + } + + res := chainInner(hasher, leafHash, proof[:inner], index) + res = chainBorderRight(hasher, res, proof[inner:]) + return res, nil +} + +// VerifyConsistency checks that the passed-in consistency proof is valid +// between the passed in tree sizes, with respect to the corresponding root +// hashes. Requires 0 <= size1 <= size2. +func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error { + switch { + case size2 < size1: + return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2) + case size1 == size2: + if len(proof) > 0 { + return errors.New("size1=size2, but proof is not empty") + } + return verifyMatch(root1, root2) + case size1 == 0: + // Any size greater than 0 is consistent with size 0. + if len(proof) > 0 { + return fmt.Errorf("expected empty proof, but got %d components", len(proof)) + } + return nil // Proof OK. + case len(proof) == 0: + return errors.New("empty proof") + } + + inner, border := decompInclProof(size1-1, size2) + shift := bits.TrailingZeros64(size1) + inner -= shift // Note: shift < inner if size1 < size2. + + // The proof includes the root hash for the sub-tree of size 2^shift. + seed, start := proof[0], 1 + if size1 == 1<> uint(shift) // Start chaining from level |shift|. + hash1 := chainInnerRight(hasher, seed, proof[:inner], mask) + hash1 = chainBorderRight(hasher, hash1, proof[inner:]) + if err := verifyMatch(hash1, root1); err != nil { + return err + } + + // Verify the second root. + hash2 := chainInner(hasher, seed, proof[:inner], mask) + hash2 = chainBorderRight(hasher, hash2, proof[inner:]) + return verifyMatch(hash2, root2) +} + +// decompInclProof breaks down inclusion proof for a leaf at the specified +// |index| in a tree of the specified |size| into 2 components. The splitting +// point between them is where paths to leaves |index| and |size-1| diverge. +// Returns lengths of the bottom and upper proof parts correspondingly. The sum +// of the two determines the correct length of the inclusion proof. +func decompInclProof(index, size uint64) (int, int) { + inner := innerProofSize(index, size) + border := bits.OnesCount64(index >> uint(inner)) + return inner, border +} + +func innerProofSize(index, size uint64) int { + return bits.Len64(index ^ (size - 1)) +} + +// chainInner computes a subtree hash for a node on or below the tree's right +// border. Assumes |proof| hashes are ordered from lower levels to upper, and +// |seed| is the initial subtree/leaf hash on the path located at the specified +// |index| on its level. +func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { + for i, h := range proof { + if (index>>uint(i))&1 == 0 { + seed = hasher.HashChildren(seed, h) + } else { + seed = hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainInnerRight computes a subtree hash like chainInner, but only takes +// hashes to the left from the path into consideration, which effectively means +// the result is a hash of the corresponding earlier version of this subtree. +func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { + for i, h := range proof { + if (index>>uint(i))&1 == 1 { + seed = hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainBorderRight chains proof hashes along tree borders. This differs from +// inner chaining because |proof| contains only left-side subtree hashes. +func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte { + for _, h := range proof { + seed = hasher.HashChildren(h, seed) + } + return seed +} diff --git a/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go new file mode 100644 index 00000000000..b04f952ef85 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go @@ -0,0 +1,68 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rfc6962 provides hashing functionality according to RFC6962. +package rfc6962 + +import ( + "crypto" + _ "crypto/sha256" // SHA256 is the default algorithm. +) + +// Domain separation prefixes +const ( + RFC6962LeafHashPrefix = 0 + RFC6962NodeHashPrefix = 1 +) + +// DefaultHasher is a SHA256 based LogHasher. +var DefaultHasher = New(crypto.SHA256) + +// Hasher implements the RFC6962 tree hashing algorithm. +type Hasher struct { + crypto.Hash +} + +// New creates a new Hashers.LogHasher on the passed in hash function. +func New(h crypto.Hash) *Hasher { + return &Hasher{Hash: h} +} + +// EmptyRoot returns a special case for an empty tree. +func (t *Hasher) EmptyRoot() []byte { + return t.New().Sum(nil) +} + +// HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf. +// The data in leaf is prefixed by the LeafHashPrefix. +func (t *Hasher) HashLeaf(leaf []byte) []byte { + h := t.New() + h.Write([]byte{RFC6962LeafHashPrefix}) + h.Write(leaf) + return h.Sum(nil) +} + +// HashChildren returns the inner Merkle tree node hash of the two child nodes l and r. +// The hashed structure is NodeHashPrefix||l||r. +func (t *Hasher) HashChildren(l, r []byte) []byte { + h := t.New() + b := append(append(append( + make([]byte, 0, 1+len(l)+len(r)), + RFC6962NodeHashPrefix), + l...), + r...) + + h.Write(b) + return h.Sum(nil) +} diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go new file mode 100644 index 00000000000..a0d81858261 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -0,0 +1,50 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer +// See THIRD-PARTY-NOTICES for original license terms. + +package bson // import "go.mongodb.org/mongo-driver/bson" + +import ( + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, +// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. +// +// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. +// +// Example usage: +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +type D = primitive.D + +// E represents a BSON element for a D. It is usually used inside a D. +type E = primitive.E + +// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not +// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be +// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. +// +// Example usage: +// +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +type M = primitive.M + +// An A is an ordered representation of a BSON array. +// +// Example usage: +// +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go new file mode 100644 index 00000000000..652aa48b853 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -0,0 +1,55 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// ArrayCodec is the Codec used for bsoncore.Array values. +// +// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. +type ArrayCodec struct{} + +var defaultArrayCodec = NewArrayCodec() + +// NewArrayCodec returns an ArrayCodec. +// +// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See +// [ArrayCodec] for more details. +func NewArrayCodec() *ArrayCodec { + return &ArrayCodec{} +} + +// EncodeValue is the ValueEncoder for bsoncore.Array values. +func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreArray { + return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} + } + + arr := val.Interface().(bsoncore.Array) + return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) +} + +// DecodeValue is the ValueDecoder for bsoncore.Array values. +func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreArray { + return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) + val.Set(reflect.ValueOf(arr)) + return err +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go new file mode 100644 index 00000000000..0693bd432fe --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -0,0 +1,382 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" + +import ( + "fmt" + "reflect" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var ( + emptyValue = reflect.Value{} +) + +// Marshaler is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. +type Marshaler interface { + MarshalBSON() ([]byte, error) +} + +// ValueMarshaler is an interface implemented by types that can marshal +// themselves into a BSON value as bytes. The type must be the valid type for +// the bytes returned. The bytes and byte type together must be valid if the +// error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. +type ValueMarshaler interface { + MarshalBSONValue() (bsontype.Type, []byte, error) +} + +// Unmarshaler is an interface implemented by types that can unmarshal a BSON +// document representation of themselves. The BSON bytes can be assumed to be +// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data +// after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. +type Unmarshaler interface { + UnmarshalBSON([]byte) error +} + +// ValueUnmarshaler is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. +type ValueUnmarshaler interface { + UnmarshalBSONValue(bsontype.Type, []byte) error +} + +// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be +// encoded by the ValueEncoder. +type ValueEncoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vee ValueEncoderError) Error() string { + typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) + for _, t := range vee.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vee.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vee.Received.Kind().String() + if vee.Received.IsValid() { + received = vee.Received.Type().String() + } + return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) +} + +// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be +// decoded by the ValueDecoder. +type ValueDecoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vde ValueDecoderError) Error() string { + typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) + for _, t := range vde.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vde.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vde.Received.Kind().String() + if vde.Received.IsValid() { + received = vde.Received.Type().String() + } + return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) +} + +// EncodeContext is the contextual information required for a Codec to encode a +// value. +type EncodeContext struct { + *Registry + + // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) + // that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. + MinSize bool + + errorOnInlineDuplicates bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool +} + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (ec *EncodeContext) ErrorOnInlineDuplicates() { + ec.errorOnInlineDuplicates = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprintf() instead of the default string conversion logic. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (ec *EncodeContext) StringifyMapKeysWithFmt() { + ec.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (ec *EncodeContext) NilMapAsEmpty() { + ec.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (ec *EncodeContext) NilSliceAsEmpty() { + ec.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (ec *EncodeContext) NilByteSliceAsEmpty() { + ec.nilByteSliceAsEmpty = true +} + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (ec *EncodeContext) OmitZeroStruct() { + ec.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. +func (ec *EncodeContext) UseJSONStructTags() { + ec.useJSONStructTags = true +} + +// DecodeContext is the contextual information required for a Codec to decode a +// value. +type DecodeContext struct { + *Registry + + // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" + // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to + // BSON "decimal128" values. + // + // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. + Truncate bool + + // Ancestor is the type of a containing document. This is mainly used to determine what type + // should be used when decoding an embedded document into an empty interface. For example, if + // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface + // will be decoded into a bson.M. + // + // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. + Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool +} + +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (dc *DecodeContext) BinaryAsSlice() { + dc.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +func (dc *DecodeContext) UseJSONStructTags() { + dc.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (dc *DecodeContext) UseLocalTimeZone() { + dc.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (dc *DecodeContext) ZeroMaps() { + dc.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (dc *DecodeContext) ZeroStructs() { + dc.zeroStructs = true +} + +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) +} + +// ValueCodec is an interface for encoding and decoding a reflect.Value. +// values. +// +// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. +type ValueCodec interface { + ValueEncoder + ValueDecoder +} + +// ValueEncoder is the interface implemented by types that can handle the encoding of a value. +type ValueEncoder interface { + EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error +} + +// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueEncoder. +type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error + +// EncodeValue implements the ValueEncoder interface. +func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + return fn(ec, vw, val) +} + +// ValueDecoder is the interface implemented by types that can handle the decoding of a value. +type ValueDecoder interface { + DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error +} + +// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueDecoder. +type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error + +// DecodeValue implements the ValueDecoder interface. +func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + return fn(dc, vr, val) +} + +// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. +type typeDecoder interface { + decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) +} + +// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. +type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) + +func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + return fn(dc, vr, t) +} + +// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. +type decodeAdapter struct { + ValueDecoderFunc + typeDecoderFunc +} + +var _ ValueDecoder = decodeAdapter{} +var _ typeDecoder = decodeAdapter{} + +// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type +// t and calls decoder.DecodeValue on it. +func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + td, _ := decoder.(typeDecoder) + return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) +} + +func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { + if td != nil { + val, err := td.decodeType(dc, vr, t) + if err == nil && convert && val.Type() != t { + // This conversion step is necessary for slices and maps. If a user declares variables like: + // + // type myBool bool + // var m map[string]myBool + // + // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present + // because we'll try to assign a value of type bool to one of type myBool. + val = val.Convert(t) + } + return val, err + } + + val := reflect.New(t).Elem() + err := vd.DecodeValue(dc, vr, val) + return val, err +} + +// CodecZeroer is the interface implemented by Codecs that can also determine if +// a value of the type that would be encoded is zero. +// +// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver +// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to +// nil instead. +type CodecZeroer interface { + IsTypeZero(interface{}) bool +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go new file mode 100644 index 00000000000..0134b5a94be --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -0,0 +1,138 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ByteSliceCodec is the Codec used for []byte values. +// +// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver +// 2.0. To configure the byte slice encode and decode behavior, use the +// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to encode nil byte slices as empty +// BSON binary values, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilByteSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in ByteSliceCodec for the +// corresponding settings. +type ByteSliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values + // instead of BSON null. + // + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty + // instead. + EncodeNilAsEmpty bool +} + +var ( + defaultByteSliceCodec = NewByteSliceCodec() + + // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultByteSliceCodec +) + +// NewByteSliceCodec returns a ByteSliceCodec with options opts. +// +// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See +// [ByteSliceCodec] for more details. +func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { + byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) + codec := ByteSliceCodec{} + if byteSliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for []byte. +func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tByteSlice { + return emptyValue, ValueDecoderError{ + Name: "ByteSliceDecodeValue", + Types: []reflect.Type{tByteSlice}, + Received: reflect.Zero(t), + } + } + + var data []byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + data = []byte(str) + case bsontype.Symbol: + sym, err := vr.ReadSymbol() + if err != nil { + return emptyValue, err + } + data = []byte(sym) + case bsontype.Binary: + var subtype byte + data, subtype, err = vr.ReadBinary() + if err != nil { + return emptyValue, err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} + } + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(data), nil +} + +// DecodeValue is the ValueDecoder for []byte. +func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + elem, err := bsc.decodeType(dc, vr, tByteSlice) + if err != nil { + return err + } + + val.Set(elem) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 00000000000..844b50299f2 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go new file mode 100644 index 00000000000..cb8180f25cc --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go @@ -0,0 +1,63 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" +) + +// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. +type condAddrEncoder struct { + canAddrEnc ValueEncoder + elseEnc ValueEncoder +} + +var _ ValueEncoder = (*condAddrEncoder)(nil) + +// newCondAddrEncoder returns an condAddrEncoder. +func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { + encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return &encoder +} + +// EncodeValue is the ValueEncoderFunc for a value that may be addressable. +func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.CanAddr() { + return cae.canAddrEnc.EncodeValue(ec, vw, val) + } + if cae.elseEnc != nil { + return cae.elseEnc.EncodeValue(ec, vw, val) + } + return ErrNoEncoder{Type: val.Type()} +} + +// condAddrDecoder is the decoder used when a pointer to the value has a decoder. +type condAddrDecoder struct { + canAddrDec ValueDecoder + elseDec ValueDecoder +} + +var _ ValueDecoder = (*condAddrDecoder)(nil) + +// newCondAddrDecoder returns an CondAddrDecoder. +func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { + decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} + return &decoder +} + +// DecodeValue is the ValueDecoderFunc for a value that may be addressable. +func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.CanAddr() { + return cad.canAddrDec.DecodeValue(dc, vr, val) + } + if cad.elseDec != nil { + return cad.elseDec.DecodeValue(dc, vr, val) + } + return ErrNoDecoder{Type: val.Type()} +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go new file mode 100644 index 00000000000..8702d6d39e0 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -0,0 +1,1819 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var ( + defaultValueDecoders DefaultValueDecoders + errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") +) + +type decodeBinaryError struct { + subtype byte + typeName string +} + +func (d decodeBinaryError) Error() string { + return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) +} + +func newDefaultStructCodec() *StructCodec { + codec, err := NewStructCodec(DefaultStructTagParser) + if err != nil { + // This function is called from the codec registration path, so errors can't be propagated. If there's an error + // constructing the StructCodec, we panic to avoid losing it. + panic(fmt.Errorf("error creating default StructCodec: %w", err)) + } + return codec +} + +// DefaultValueDecoders is a namespace type for the default ValueDecoders used +// when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +type DefaultValueDecoders struct{} + +// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with +// the provided RegistryBuilder. +// +// There is no support for decoding map[string]interface{} because there is no decoder for +// interface{}, so users must either register this decoder themselves or use the +// EmptyInterfaceDecoder available in the bson package. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) + } + + intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} + floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} + + rb. + RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). + RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). + RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). + RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). + RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). + RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). + RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). + RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). + RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). + RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). + RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). + RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). + RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeDecoder(tTime, defaultTimeCodec). + RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeDecoder(tCoreArray, defaultArrayCodec). + RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). + RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). + RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). + RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). + RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). + RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). + RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). + RegisterDefaultDecoder(reflect.Int, intDecoder). + RegisterDefaultDecoder(reflect.Int8, intDecoder). + RegisterDefaultDecoder(reflect.Int16, intDecoder). + RegisterDefaultDecoder(reflect.Int32, intDecoder). + RegisterDefaultDecoder(reflect.Int64, intDecoder). + RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Float32, floatDecoder). + RegisterDefaultDecoder(reflect.Float64, floatDecoder). + RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). + RegisterDefaultDecoder(reflect.Map, defaultMapCodec). + RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultDecoder(reflect.String, defaultStringCodec). + RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). + RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). + RegisterTypeMapEntry(bsontype.Double, tFloat64). + RegisterTypeMapEntry(bsontype.String, tString). + RegisterTypeMapEntry(bsontype.Array, tA). + RegisterTypeMapEntry(bsontype.Binary, tBinary). + RegisterTypeMapEntry(bsontype.Undefined, tUndefined). + RegisterTypeMapEntry(bsontype.ObjectID, tOID). + RegisterTypeMapEntry(bsontype.Boolean, tBool). + RegisterTypeMapEntry(bsontype.DateTime, tDateTime). + RegisterTypeMapEntry(bsontype.Regex, tRegex). + RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). + RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). + RegisterTypeMapEntry(bsontype.Symbol, tSymbol). + RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). + RegisterTypeMapEntry(bsontype.Int32, tInt32). + RegisterTypeMapEntry(bsontype.Int64, tInt64). + RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). + RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). + RegisterTypeMapEntry(bsontype.MinKey, tMinKey). + RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). + RegisterTypeMapEntry(bsontype.Type(0), tD). + RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). + RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). + RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) +} + +// DDecodeValue is the ValueDecoderFunc for primitive.D instances. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || !val.CanSet() || val.Type() != tD { + return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + dc.Ancestor = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a primitive.D", vrType) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return err + } + tEmptyTypeDecoder, _ := decoder.(typeDecoder) + + // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. + var elems primitive.D + if !val.IsNil() { + val.SetLen(0) + elems = val.Interface().(primitive.D) + } else { + elems = make(primitive.D, 0) + } + + for { + key, elemVr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } else if err != nil { + return err + } + + // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. + elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) + if err != nil { + return err + } + + elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) + } + + val.Set(reflect.ValueOf(elems)) + return nil +} + +func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t.Kind() != reflect.Bool { + return emptyValue, ValueDecoderError{ + Name: "BooleanDecodeValue", + Kinds: []reflect.Kind{reflect.Bool}, + Received: reflect.Zero(t), + } + } + + var b bool + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + b = (i32 != 0) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + b = (i64 != 0) + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + b = (f64 != 0) + case bsontype.Boolean: + b, err = vr.ReadBoolean() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(b), nil +} + +// BooleanDecodeValue is the ValueDecoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { + return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + + elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) + if err != nil { + return err + } + + val.SetBool(elem.Bool()) + return nil +} + +func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return emptyValue, err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return emptyValue, errCannotTruncate + } + if f64 > float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Int8: + if i64 < math.MinInt8 || i64 > math.MaxInt8 { + return emptyValue, fmt.Errorf("%d overflows int8", i64) + } + + return reflect.ValueOf(int8(i64)), nil + case reflect.Int16: + if i64 < math.MinInt16 || i64 > math.MaxInt16 { + return emptyValue, fmt.Errorf("%d overflows int16", i64) + } + + return reflect.ValueOf(int16(i64)), nil + case reflect.Int32: + if i64 < math.MinInt32 || i64 > math.MaxInt32 { + return emptyValue, fmt.Errorf("%d overflows int32", i64) + } + + return reflect.ValueOf(int32(i64)), nil + case reflect.Int64: + return reflect.ValueOf(i64), nil + case reflect.Int: + if i64 > math.MaxInt { // Can we fit this inside of an int + return emptyValue, fmt.Errorf("%d overflows int", i64) + } + + return reflect.ValueOf(int(i64)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.Zero(t), + } + } +} + +// IntDecodeValue is the ValueDecoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + elem, err := dvd.intDecodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetInt(elem.Int()) + return nil +} + +// UintDecodeValue is the ValueDecoderFunc for uint types. +// +// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var i64 int64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + i64 = 1 + } + default: + return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + switch val.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return fmt.Errorf("%d overflows uint8", i64) + } + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return fmt.Errorf("%d overflows uint16", i64) + } + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return fmt.Errorf("%d overflows uint32", i64) + } + case reflect.Uint64: + if i64 < 0 { + return fmt.Errorf("%d overflows uint64", i64) + } + case reflect.Uint: + if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint + return fmt.Errorf("%d overflows uint", i64) + } + default: + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + val.SetUint(uint64(i64)) + return nil +} + +func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var f float64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + f = float64(i32) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + f = float64(i64) + case bsontype.Double: + f, err = vr.ReadDouble() + if err != nil { + return emptyValue, err + } + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + f = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) + } + + switch t.Kind() { + case reflect.Float32: + if !dc.Truncate && float64(float32(f)) != f { + return emptyValue, errCannotTruncate + } + + return reflect.ValueOf(float32(f)), nil + case reflect.Float64: + return reflect.ValueOf(f), nil + default: + return emptyValue, ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.Zero(t), + } + } +} + +// FloatDecodeValue is the ValueDecoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: val, + } + } + + elem, err := dvd.floatDecodeType(ec, vr, val.Type()) + if err != nil { + return err + } + + val.SetFloat(elem.Float()) + return nil +} + +// StringDecodeValue is the ValueDecoderFunc for string types. +// +// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var str string + var err error + switch vr.Type() { + // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + val.SetString(str) + return nil +} + +func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJavaScript { + return emptyValue, ValueDecoderError{ + Name: "JavaScriptDecodeValue", + Types: []reflect.Type{tJavaScript}, + Received: reflect.Zero(t), + } + } + + var js string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.JavaScript: + js, err = vr.ReadJavascript() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.JavaScript(js)), nil +} + +// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJavaScript { + return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tSymbol { + return emptyValue, ValueDecoderError{ + Name: "SymbolDecodeValue", + Types: []reflect.Type{tSymbol}, + Received: reflect.Zero(t), + } + } + + var symbol string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + symbol, err = vr.ReadString() + case bsontype.Symbol: + symbol, err = vr.ReadSymbol() + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} + } + symbol = string(data) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Symbol(symbol)), nil +} + +// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tSymbol { + return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tBinary { + return emptyValue, ValueDecoderError{ + Name: "BinaryDecodeValue", + Types: []reflect.Type{tBinary}, + Received: reflect.Zero(t), + } + } + + var data []byte + var subtype byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Binary: + data, subtype, err = vr.ReadBinary() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil +} + +// BinaryDecodeValue is the ValueDecoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tBinary { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + + elem, err := dvd.binaryDecodeType(dc, vr, tBinary) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tUndefined { + return emptyValue, ValueDecoderError{ + Name: "UndefinedDecodeValue", + Types: []reflect.Type{tUndefined}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Undefined{}), nil +} + +// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tUndefined { + return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// Accept both 12-byte string and pretty-printed 24-byte hex string formats. +func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tOID { + return emptyValue, ValueDecoderError{ + Name: "ObjectIDDecodeValue", + Types: []reflect.Type{tOID}, + Received: reflect.Zero(t), + } + } + + var oid primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.ObjectID: + oid, err = vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + if oid, err = primitive.ObjectIDFromHex(str); err == nil { + break + } + if len(str) != 12 { + return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) + } + byteArr := []byte(str) + copy(oid[:], byteArr) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) + } + + return reflect.ValueOf(oid), nil +} + +// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tOID { + return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} + } + + elem, err := dvd.objectIDDecodeType(dc, vr, tOID) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDateTime { + return emptyValue, ValueDecoderError{ + Name: "DateTimeDecodeValue", + Types: []reflect.Type{tDateTime}, + Received: reflect.Zero(t), + } + } + + var dt int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err = vr.ReadDateTime() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DateTime(dt)), nil +} + +// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDateTime { + return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tNull { + return emptyValue, ValueDecoderError{ + Name: "NullDecodeValue", + Types: []reflect.Type{tNull}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Null{}), nil +} + +// NullDecodeValue is the ValueDecoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tNull { + return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + elem, err := dvd.nullDecodeType(dc, vr, tNull) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tRegex { + return emptyValue, ValueDecoderError{ + Name: "RegexDecodeValue", + Types: []reflect.Type{tRegex}, + Received: reflect.Zero(t), + } + } + + var pattern, options string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Regex: + pattern, options, err = vr.ReadRegex() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil +} + +// RegexDecodeValue is the ValueDecoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tRegex { + return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + elem, err := dvd.regexDecodeType(dc, vr, tRegex) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDBPointer { + return emptyValue, ValueDecoderError{ + Name: "DBPointerDecodeValue", + Types: []reflect.Type{tDBPointer}, + Received: reflect.Zero(t), + } + } + + var ns string + var pointer primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DBPointer: + ns, pointer, err = vr.ReadDBPointer() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil +} + +// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDBPointer { + return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { + if reflectType != tTimestamp { + return emptyValue, ValueDecoderError{ + Name: "TimestampDecodeValue", + Types: []reflect.Type{tTimestamp}, + Received: reflect.Zero(reflectType), + } + } + + var t, incr uint32 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Timestamp: + t, incr, err = vr.ReadTimestamp() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil +} + +// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTimestamp { + return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMinKey { + return emptyValue, ValueDecoderError{ + Name: "MinKeyDecodeValue", + Types: []reflect.Type{tMinKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MinKey: + err = vr.ReadMinKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MinKey{}), nil +} + +// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMinKey { + return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMaxKey { + return emptyValue, ValueDecoderError{ + Name: "MaxKeyDecodeValue", + Types: []reflect.Type{tMaxKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MaxKey: + err = vr.ReadMaxKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MaxKey{}), nil +} + +// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMaxKey { + return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDecimal { + return emptyValue, ValueDecoderError{ + Name: "Decimal128DecodeValue", + Types: []reflect.Type{tDecimal}, + Received: reflect.Zero(t), + } + } + + var d128 primitive.Decimal128 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Decimal128: + d128, err = vr.ReadDecimal128() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(d128), nil +} + +// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDecimal { + return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + + elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJSONNumber { + return emptyValue, ValueDecoderError{ + Name: "JSONNumberDecodeValue", + Types: []reflect.Type{tJSONNumber}, + Received: reflect.Zero(t), + } + } + + var jsonNum json.Number + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(i64, 10)) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(jsonNum), nil +} + +// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJSONNumber { + return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + + elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tURL { + return emptyValue, ValueDecoderError{ + Name: "URLDecodeValue", + Types: []reflect.Type{tURL}, + Received: reflect.Zero(t), + } + } + + urlPtr := &url.URL{} + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + var str string // Declare str here to avoid shadowing err during the ReadString call. + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + + urlPtr, err = url.Parse(str) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(urlPtr).Elem(), nil +} + +// URLDecodeValue is the ValueDecoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tURL { + return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} + } + + elem, err := dvd.urlDecodeType(dc, vr, tURL) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// TimeDecodeValue is the ValueDecoderFunc for time.Time. +// +// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.DateTime { + return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) + } + + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) + return nil +} + +// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. +// +// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { + return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) + } + + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != 0x00 { + return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) + } + + val.Set(reflect.ValueOf(data)) + return nil +} + +// MapDecodeValue is the ValueDecoderFunc for map[string]* types. +// +// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) + } + return nil +} + +// ArrayDecodeValue is the ValueDecoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if len(data) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) + } + + for idx, elem := range data { + val.Index(idx).Set(reflect.ValueOf(elem)) + } + return nil + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into an array", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if len(elems) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) + } + + for idx, elem := range elems { + val.Index(idx).Set(elem) + } + + return nil +} + +// SliceDecodeValue is the ValueDecoderFunc for slice types. +// +// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vr.Type() { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + default: + return fmt.Errorf("cannot decode %v into a slice", vr.Type()) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} + +// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + + // If BSON value is null and the go value is a pointer, then don't call + // UnmarshalBSONValue. Even if the Go pointer is already initialized (i.e., + // non-nil), encountering null in BSON will result in the pointer being + // directly set to nil here. Since the pointer is being replaced with nil, + // there is no opportunity (or reason) for the custom UnmarshalBSONValue logic + // to be called. + if vr.Type() == bsontype.Null && val.Kind() == reflect.Ptr { + val.Set(reflect.Zero(val.Type())) + + return vr.ReadNull() + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tValueUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + return m.UnmarshalBSONValue(t, src) +} + +// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + + if !val.Type().Implements(tUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + return m.UnmarshalBSON(src) +} + +// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + rtype, err := dc.LookupTypeMapEntry(vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.EmbeddedDocument: + if dc.Ancestor != nil { + rtype = dc.Ancestor + break + } + rtype = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return err + } + + elem := reflect.New(rtype).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreDocument { + return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + + cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) + val.Set(reflect.ValueOf(cdoc)) + return err +} + +func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { + elems := make([]reflect.Value, 0) + + ar, err := vr.ReadArray() + if err != nil { + return nil, err + } + + eType := val.Type().Elem() + + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return nil, err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + idx := 0 + for { + vr, err := ar.ReadValue() + if errors.Is(err, bsonrw.ErrEOA) { + break + } + if err != nil { + return nil, err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return nil, newDecodeError(strconv.Itoa(idx), err) + } + elems = append(elems, elem) + idx++ + } + + return elems, nil +} + +func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { + var cws primitive.CodeWithScope + + code, dr, err := vr.ReadCodeWithScope() + if err != nil { + return cws, err + } + + scope := reflect.New(tD).Elem() + elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) + if err != nil { + return cws, err + } + + scope.Set(reflect.MakeSlice(tD, 0, len(elems))) + scope.Set(reflect.Append(scope, elems...)) + + cws = primitive.CodeWithScope{ + Code: primitive.JavaScript(code), + Scope: scope.Interface().(primitive.D), + } + return cws, nil +} + +func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tCodeWithScope { + return emptyValue, ValueDecoderError{ + Name: "CodeWithScopeDecodeValue", + Types: []reflect.Type{tCodeWithScope}, + Received: reflect.Zero(t), + } + } + + var cws primitive.CodeWithScope + var err error + switch vrType := vr.Type(); vrType { + case bsontype.CodeWithScope: + cws, err = dvd.readCodeWithScope(dc, vr) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(cws), nil +} + +// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCodeWithScope { + return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + default: + return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return nil, err + } + + return dvd.decodeElemsFromDocumentReader(dc, dr) +} + +func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return nil, err + } + + elems := make([]reflect.Value, 0) + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return nil, err + } + + val := reflect.New(tEmpty).Elem() + err = decoder.DecodeValue(dc, vr, val) + if err != nil { + return nil, newDecodeError(key, err) + } + + elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) + } + + return elems, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go new file mode 100644 index 00000000000..4751ae995e7 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -0,0 +1,856 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueEncoders DefaultValueEncoders + +var bvwPool = bsonrw.NewBSONValueWriterPool() + +var errInvalidValue = errors.New("cannot encode invalid element") + +var sliceWriterPool = sync.Pool{ + New: func() interface{} { + sw := make(bsonrw.SliceWriter, 0) + return &sw + }, +} + +func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { + vw, err := dw.WriteDocumentElement(e.Key) + if err != nil { + return err + } + + if e.Value == nil { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) + if err != nil { + return err + } + return nil +} + +// DefaultValueEncoders is a namespace type for the default ValueEncoders used +// when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +type DefaultValueEncoders struct{} + +// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with +// the provided RegistryBuilder. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) + } + rb. + RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeEncoder(tTime, defaultTimeCodec). + RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeEncoder(tCoreArray, defaultArrayCodec). + RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). + RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). + RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). + RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). + RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). + RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). + RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). + RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). + RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). + RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). + RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). + RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). + RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). + RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). + RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). + RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). + RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). + RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). + RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). + RegisterDefaultEncoder(reflect.Map, defaultMapCodec). + RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultEncoder(reflect.String, defaultStringCodec). + RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). + RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). + RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). + RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). + RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) +} + +// BooleanEncodeValue is the ValueEncoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Bool { + return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + return vw.WriteBoolean(val.Bool()) +} + +func fitsIn32Bits(i int64) bool { + return math.MinInt32 <= i && i <= math.MaxInt32 +} + +// IntEncodeValue is the ValueEncoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + return vw.WriteInt32(int32(val.Int())) + case reflect.Int: + i64 := val.Int() + if fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + case reflect.Int64: + i64 := val.Int() + if ec.MinSize && fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + } + + return ValueEncoderError{ + Name: "IntEncodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } +} + +// UintEncodeValue is the ValueEncoderFunc for uint types. +// +// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. +func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + if ec.MinSize && u64 <= math.MaxInt32 { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +// FloatEncodeValue is the ValueEncoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Float32, reflect.Float64: + return vw.WriteDouble(val.Float()) + } + + return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} +} + +// StringEncodeValue is the ValueEncoderFunc for string types. +// +// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. +func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tOID { + return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} + } + return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) +} + +// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDecimal { + return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) +} + +// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJSONNumber { + return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + jsnum := val.Interface().(json.Number) + + // Attempt int first, then float64 + if i64, err := jsnum.Int64(); err == nil { + return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) + } + + f64, err := jsnum.Float64() + if err != nil { + return err + } + + return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) +} + +// URLEncodeValue is the ValueEncoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tURL { + return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} + } + u := val.Interface().(url.URL) + return vw.WriteString(u.String()) +} + +// TimeEncodeValue is the ValueEncoderFunc for time.TIme. +// +// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. +func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} + +// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. +// +// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +// MapEncodeValue is the ValueEncoderFunc for map[string]* types. +// +// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. +func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() { + // If we have a nill map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return dve.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + if collisionFn != nil && collisionFn(key.String()) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(key.String()) + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// ArrayEncodeValue is the ValueEncoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().Elem() == tE { + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + e := val.Index(idx).Interface().(primitive.E) + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + var byteSlice []byte + for idx := 0; idx < val.Len(); idx++ { + byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) + } + return vw.WriteBinary(byteSlice) + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// SliceEncodeValue is the ValueEncoderFunc for slice types. +// +// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { + if origEncoder != nil || (currVal.Kind() != reflect.Interface) { + return origEncoder, currVal, nil + } + currVal = currVal.Elem() + if !currVal.IsValid() { + return nil, currVal, errInvalidValue + } + currEncoder, err := ec.LookupEncoder(currVal.Type()) + + return currEncoder, currVal, err +} + +// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement ValueMarshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + case val.Type().Implements(tValueMarshaler): + // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tValueMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + } + + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) +} + +// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Marshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + case val.Type().Implements(tMarshaler): + // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + } + + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) +} + +// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Proxy + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + case val.Type().Implements(tProxy): + // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tProxy) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + } + + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() + } + v, err := m.ProxyBSON() + if err != nil { + return err + } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) + } + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() + } + encoder, err := ec.LookupEncoder(vv.Type()) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, vv) +} + +// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJavaScript { + return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + return vw.WriteJavascript(val.String()) +} + +// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tSymbol { + return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + return vw.WriteSymbol(val.String()) +} + +// BinaryEncodeValue is the ValueEncoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tBinary { + return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + b := val.Interface().(primitive.Binary) + + return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) +} + +// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tUndefined { + return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + return vw.WriteUndefined() +} + +// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDateTime { + return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + return vw.WriteDateTime(val.Int()) +} + +// NullEncodeValue is the ValueEncoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tNull { + return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + return vw.WriteNull() +} + +// RegexEncodeValue is the ValueEncoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tRegex { + return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + regex := val.Interface().(primitive.Regex) + + return vw.WriteRegex(regex.Pattern, regex.Options) +} + +// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDBPointer { + return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + dbp := val.Interface().(primitive.DBPointer) + + return vw.WriteDBPointer(dbp.DB, dbp.Pointer) +} + +// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTimestamp { + return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + ts := val.Interface().(primitive.Timestamp) + + return vw.WriteTimestamp(ts.T, ts.I) +} + +// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMinKey { + return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + return vw.WriteMinKey() +} + +// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMaxKey { + return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + return vw.WriteMaxKey() +} + +// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreDocument { + return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + cdoc := val.Interface().(bsoncore.Document) + + return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) +} + +// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCodeWithScope { + return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + cws := val.Interface().(primitive.CodeWithScope) + + dw, err := vw.WriteCodeWithScope(string(cws.Code)) + if err != nil { + return err + } + + sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) + defer sliceWriterPool.Put(sw) + *sw = (*sw)[:0] + + scopeVW := bvwPool.Get(sw) + defer bvwPool.Put(scopeVW) + + encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) + if err != nil { + return err + } + + err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) + if err != nil { + return err + } + return dw.WriteDocumentEnd() +} + +// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type +func isImplementationNil(val reflect.Value, inter reflect.Type) bool { + vt := val.Type() + for vt.Kind() == reflect.Ptr { + vt = vt.Elem() + } + return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go new file mode 100644 index 00000000000..4613e5a1ec7 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -0,0 +1,95 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncodec provides a system for encoding values to BSON representations and decoding +// values from BSON representations. This package considers both binary BSON and ExtendedJSON as +// BSON representations. The types in this package enable a flexible system for handling this +// encoding and decoding. +// +// The codec system is composed of two parts: +// +// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON +// representations. +// +// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for +// retrieving them. +// +// # ValueEncoders and ValueDecoders +// +// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. +// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the +// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc +// is provided to allow use of a function with the correct signature as a ValueEncoder. An +// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and +// to provide configuration information. +// +// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that +// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to +// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext +// instance is provided and serves similar functionality to the EncodeContext. +// +// # Registry +// +// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can +// have three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and +// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value +// whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding +// values whose type is the interface, but not for values with concrete types that implement the +// interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and +// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs +// will be invoked when encoding or decoding values whose types implement the interface. An example +// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method +// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type +// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. +// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, +// respectively, when decoding into a bson.D. The following code would change the behavior so these +// values decode as Go int instances instead: +// +// intType := reflect.TypeOf(int(0)) +// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and +// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding +// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't +// match a registered type or hook encoder/decoder first. These methods should be used to change the +// behavior for all values for a specific kind. +// +// # Registry Lookup Procedure +// +// When looking up an encoder in a Registry, the precedence rules are as follows: +// +// 1. A type encoder registered for the exact type of the value. +// +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to +// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and +// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries +// constructed using bson.NewRegistry have driver-defined hooks registered for the +// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take +// precedence over any new hooks. +// +// 3. A kind encoder registered for the value's kind. +// +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The +// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder +// will be returned if no decoder is found. +// +// # DefaultValueEncoders and DefaultValueDecoders +// +// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and +// ValueDecoders for handling a wide range of Go types, including all of the types within the +// primitive package. To make registering these codecs easier, a helper method on each type is +// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for +// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also +// handles registering type map entries for each BSON type. +package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go new file mode 100644 index 00000000000..098368f0711 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -0,0 +1,173 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// EmptyInterfaceCodec is the Codec used for interface{} values. +// +// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go +// Driver 2.0. To configure the empty interface encode and decode behavior, use +// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to unmarshal BSON binary field +// values as a Go byte slice, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// BinaryAsSlice: true, +// }) +// +// See the deprecation notice for each field in EmptyInterfaceCodec for the +// corresponding settings. +type EmptyInterfaceCodec struct { + // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the + // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. + // + // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. + DecodeBinaryAsSlice bool +} + +var ( + defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() + + // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it + // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultEmptyInterfaceCodec +) + +// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +// +// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See +// [EmptyInterfaceCodec] for more details. +func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { + interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) + + codec := EmptyInterfaceCodec{} + if interfaceOpt.DecodeBinaryAsSlice != nil { + codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice + } + return &codec +} + +// EncodeValue is the ValueEncoderFunc for interface{}. +func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { + isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } + } + + rtype, err := dc.LookupTypeMapEntry(valueType) + if err == nil { + return rtype, nil + } + + if isDocument { + // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, + // depending on the original valueType. + var lookupType bsontype.Type + switch valueType { + case bsontype.Type(0): + lookupType = bsontype.EmbeddedDocument + case bsontype.EmbeddedDocument: + lookupType = bsontype.Type(0) + } + + rtype, err = dc.LookupTypeMapEntry(lookupType) + if err == nil { + return rtype, nil + } + } + + return nil, err +} + +func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tEmpty { + return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} + } + + rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.Null: + return reflect.Zero(t), vr.ReadNull() + default: + return emptyValue, err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return emptyValue, err + } + + elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) + if err != nil { + return emptyValue, err + } + + if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { + binElem := elem.Interface().(primitive.Binary) + if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { + elem = reflect.ValueOf(binElem.Data) + } + } + + return elem, nil +} + +// DecodeValue is the ValueDecoderFunc for interface{}. +func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + elem, err := eic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.Set(elem) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go new file mode 100644 index 00000000000..d7e00ffa8d1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -0,0 +1,343 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultMapCodec = NewMapCodec() + +// MapCodec is the Codec used for map values. +// +// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To +// configure the map encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON +// documents, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilMapAsEmpty: true, +// }) +// +// See the deprecation notice for each field in MapCodec for the corresponding +// settings. +type MapCodec struct { + // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination + // value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. + DecodeZerosMap bool + + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. + EncodeNilAsEmpty bool + + // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name + // strings using fmt.Sprintf() instead of the default string conversion logic. + // + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or + // options.BSONOptions.StringifyMapKeysWithFmt instead. + EncodeKeysWithStringer bool +} + +// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. +// This applies to types used as map keys and is similar to encoding.TextMarshaler. +type KeyMarshaler interface { + MarshalKey() (key string, err error) +} + +// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation +// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. +// +// UnmarshalKey must be able to decode the form generated by MarshalKey. +// UnmarshalKey must copy the text if it wishes to retain the text +// after returning. +type KeyUnmarshaler interface { + UnmarshalKey(key string) error +} + +// NewMapCodec returns a MapCodec with options opts. +// +// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See +// [MapCodec] for more details. +func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { + mapOpt := bsonoptions.MergeMapCodecOptions(opts...) + + codec := MapCodec{} + if mapOpt.DecodeZerosMap != nil { + codec.DecodeZerosMap = *mapOpt.DecodeZerosMap + } + if mapOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty + } + if mapOpt.EncodeKeysWithStringer != nil { + codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer + } + return &codec +} + +// EncodeValue is the ValueEncoder for map[*]* types. +func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { + // If we have a nil map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return mc.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) + if err != nil { + return err + } + + if collisionFn != nil && collisionFn(keyStr) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(keyStr) + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// DecodeValue is the ValueDecoder for map[string/decimal]* types. +func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { + clearMap(val) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return err + } + + k, err := mc.decodeKey(key, keyType) + if err != nil { + return err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return newDecodeError(key, err) + } + + val.SetMapIndex(k, elem) + } + return nil +} + +func clearMap(m reflect.Value) { + var none reflect.Value + for _, k := range m.MapKeys() { + m.SetMapIndex(k, none) + } +} + +func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { + if mc.EncodeKeysWithStringer || encodeKeysWithStringer { + return fmt.Sprint(val), nil + } + + // keys of any string type are used directly + if val.Kind() == reflect.String { + return val.String(), nil + } + // KeyMarshalers are marshaled + if km, ok := val.Interface().(KeyMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + buf, err := km.MarshalKey() + if err == nil { + return buf, nil + } + return "", err + } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } + + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil + } + return "", fmt.Errorf("unsupported key type: %v", val.Type()) +} + +var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { + keyVal := reflect.ValueOf(key) + var err error + switch { + // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler + case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(KeyUnmarshaler) + err = v.UnmarshalKey(key) + keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() + // Otherwise, go to type specific behavior + default: + switch keyType.Kind() { + case reflect.String: + keyVal = reflect.ValueOf(key).Convert(keyType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, parseErr := strconv.ParseInt(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, parseErr := strconv.ParseUint(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + break + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Float32, reflect.Float64: + if mc.EncodeKeysWithStringer { + parsed, err := strconv.ParseFloat(key, 64) + if err != nil { + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) + } + keyVal = reflect.ValueOf(parsed) + break + } + fallthrough + default: + return keyVal, fmt.Errorf("unsupported key type: %v", keyType) + } + } + return keyVal, err +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go new file mode 100644 index 00000000000..fbd9f0a9e97 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import "fmt" + +type mode int + +const ( + _ mode = iota + mTopLevel + mDocument + mArray + mValue + mElement + mCodeWithScope + mSpacer +) + +func (m mode) String() string { + var str string + + switch m { + case mTopLevel: + str = "TopLevel" + case mDocument: + str = "DocumentMode" + case mArray: + str = "ArrayMode" + case mValue: + str = "ValueMode" + case mElement: + str = "ElementMode" + case mCodeWithScope: + str = "CodeWithScopeMode" + case mSpacer: + str = "CodeWithScopeSpacerFrame" + default: + str = "UnknownMode" + } + + return str +} + +// TransitionError is an error returned when an invalid progressing a +// ValueReader or ValueWriter state machine occurs. +type TransitionError struct { + parent mode + current mode + destination mode +} + +func (te TransitionError) Error() string { + if te.destination == mode(0) { + return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) + } + if te.parent == mode(0) { + return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) + } + return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go new file mode 100644 index 00000000000..ddfa4a33e18 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -0,0 +1,108 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var _ ValueEncoder = &PointerCodec{} +var _ ValueDecoder = &PointerCodec{} + +// PointerCodec is the Codec used for pointers. +// +// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To +// override the default pointer encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for pointers. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) +// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) +type PointerCodec struct { + ecache typeEncoderCache + dcache typeDecoderCache +} + +// NewPointerCodec returns a PointerCodec that has been initialized. +// +// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See +// [PointerCodec] for more details. +func NewPointerCodec() *PointerCodec { + return &PointerCodec{} +} + +// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil +// or looking up an encoder for the type of value the pointer points to. +func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.Ptr { + if !val.IsValid() { + return vw.WriteNull() + } + return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} + } + return v.EncodeValue(ec, vw, val.Elem()) + } + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) + if err != nil { + return err + } + return enc.EncodeValue(ec, vw, val.Elem()) +} + +// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and +// using that to decode. If the BSON value is Null, this method will set the pointer to nil. +func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Ptr { + return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + typ := val.Type() + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(typ)) + return vr.ReadNull() + } + if vr.Type() == bsontype.Undefined { + val.Set(reflect.Zero(typ)) + return vr.ReadUndefined() + } + + if val.IsNil() { + val.Set(reflect.New(typ.Elem())) + } + + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} + } + return v.DecodeValue(dc, vr, val.Elem()) + } + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) + if err != nil { + return err + } + return dec.DecodeValue(dc, vr, val.Elem()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go new file mode 100644 index 00000000000..4cf2b01ab48 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types +// that implement this interface with have ProxyBSON called during the encoding process and that +// value will be encoded in place for the implementer. +type Proxy interface { + ProxyBSON() (interface{}, error) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go new file mode 100644 index 00000000000..196c491bbbf --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -0,0 +1,524 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +// +// Deprecated: ErrNilType will not be supported in Go Driver 2.0. +var ErrNilType = errors.New("cannot perform a decoder lookup on ") + +// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +// +// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. +var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") + +// ErrNoEncoder is returned when there wasn't an encoder available for a type. +// +// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. +type ErrNoEncoder struct { + Type reflect.Type +} + +func (ene ErrNoEncoder) Error() string { + if ene.Type == nil { + return "no encoder found for " + } + return "no encoder found for " + ene.Type.String() +} + +// ErrNoDecoder is returned when there wasn't a decoder available for a type. +// +// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. +type ErrNoDecoder struct { + Type reflect.Type +} + +func (end ErrNoDecoder) Error() string { + return "no decoder found for " + end.Type.String() +} + +// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +// +// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. +type ErrNoTypeMapEntry struct { + Type bsontype.Type +} + +func (entme ErrNoTypeMapEntry) Error() string { + return "no type map entry found for " + entme.Type.String() +} + +// ErrNotInterface is returned when the provided type is not an interface. +// +// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. +var ErrNotInterface = errors.New("The provided type is not an interface") + +// A RegistryBuilder is used to build a Registry. This type is not goroutine +// safe. +// +// Deprecated: Use Registry instead. +type RegistryBuilder struct { + registry *Registry +} + +// NewRegistryBuilder creates a new empty RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. +func NewRegistryBuilder() *RegistryBuilder { + return &RegistryBuilder{ + registry: NewRegistry(), + } +} + +// RegisterCodec will register the provided ValueCodec for the provided type. +// +// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. +func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { + rb.RegisterTypeEncoder(t, codec) + rb.RegisterTypeDecoder(t, codec) + return rb +} + +// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. +// +// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It +// will not be called when marshaling a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeEncoder instead. +func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterTypeEncoder(t, enc) + return rb +} + +// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when +// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceEncoder instead. +func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterInterfaceEncoder(t, enc) + return rb +} + +// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. +// +// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. +// It will not be called when unmarshaling into a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeDecoder instead. +func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterTypeDecoder(t, dec) + return rb +} + +// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when +// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceDecoder instead. +func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterInterfaceDecoder(t, dec) + return rb +} + +// RegisterEncoder registers the provided type and encoder pair. +// +// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. +func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t == tEmpty { + rb.registry.RegisterTypeEncoder(t, enc) + return rb + } + switch t.Kind() { + case reflect.Interface: + rb.registry.RegisterInterfaceEncoder(t, enc) + default: + rb.registry.RegisterTypeEncoder(t, enc) + } + return rb +} + +// RegisterDecoder registers the provided type and decoder pair. +// +// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. +func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t == nil { + rb.registry.RegisterTypeDecoder(t, dec) + return rb + } + if t == tEmpty { + rb.registry.RegisterTypeDecoder(t, dec) + return rb + } + switch t.Kind() { + case reflect.Interface: + rb.registry.RegisterInterfaceDecoder(t, dec) + default: + rb.registry.RegisterTypeDecoder(t, dec) + } + return rb +} + +// RegisterDefaultEncoder will register the provided ValueEncoder to the provided +// kind. +// +// Deprecated: Use Registry.RegisterKindEncoder instead. +func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterKindEncoder(kind, enc) + return rb +} + +// RegisterDefaultDecoder will register the provided ValueDecoder to the +// provided kind. +// +// Deprecated: Use Registry.RegisterKindDecoder instead. +func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterKindDecoder(kind, dec) + return rb +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +// +// Deprecated: Use Registry.RegisterTypeMapEntry instead. +func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { + rb.registry.RegisterTypeMapEntry(bt, rt) + return rb +} + +// Build creates a Registry from the current state of this RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. +func (rb *RegistryBuilder) Build() *Registry { + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), + } + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r +} + +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), + } +} + +// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. +// +// The type will be used as provided, so an encoder can be registered for a type and a different +// encoder can be registered for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is +// that interface. It will not be called when marshaling a non-interface type that implements the +// interface. To get the latter behavior, call RegisterHookEncoder instead. +// +// RegisterTypeEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { + r.typeEncoders.Store(valueType, enc) +} + +// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. +// +// The type will be used as provided, so a decoder can be registered for a type and a different +// decoder can be registered for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that +// is that interface. It will not be called when unmarshaling into a non-interface type that +// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. +// +// RegisterTypeDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { + r.typeDecoders.Store(valueType, dec) +} + +// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. +// +// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an encoder for MyInt and int32, use RegisterKindEncoder like +// +// reg.RegisterKindEncoder(reflect.Int32, myEncoder) +// +// RegisterKindEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { + r.kindEncoders.Store(kind, enc) +} + +// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. +// +// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an decoder for MyInt and int32, use RegisterKindDecoder like +// +// reg.RegisterKindDecoder(reflect.Int32, myDecoder) +// +// RegisterKindDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { + r.kindDecoders.Store(kind, dec) +} + +// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will +// be called when marshaling a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface +// (i.e. iface.Kind() != reflect.Interface), this method will panic. +// +// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, encoder := range r.interfaceEncoders { + if encoder.i == iface { + r.interfaceEncoders[idx].ve = enc + return + } + } + + r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) +} + +// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will +// be called when unmarshaling into a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), +// this method will panic. +// +// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, decoder := range r.interfaceDecoders { + if decoder.i == iface { + r.interfaceDecoders[idx].vd = dec + return + } + } + + r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { + r.typeMap.Store(bt, rt) +} + +// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup +// order: +// +// 1. An encoder registered for the exact type. If the given type is an interface, an encoder +// registered using RegisterTypeEncoder for that interface will be selected. +// +// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type +// or by a pointer to the type. +// +// 3. An encoder registered using RegisterKindEncoder for the kind of value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for +// concurrent use by multiple goroutines after all codecs and encoders are registered. +func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } + enc, found := r.lookupTypeEncoder(valueType) + if found { + if enc == nil { + return nil, ErrNoEncoder{Type: valueType} + } + return enc, nil + } + + enc, found = r.lookupInterfaceEncoder(valueType, true) + if found { + return r.typeEncoders.LoadOrStore(valueType, enc), nil + } + + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil + } + return nil, ErrNoEncoder{Type: valueType} +} + +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) +} + +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) +} + +func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if valueType == nil { + return nil, false + } + for _, ienc := range r.interfaceEncoders { + if valueType.Implements(ienc.i) { + return ienc.ve, true + } + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) + if !found { + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) + } + return newCondAddrEncoder(ienc.ve, defaultEnc), true + } + } + return nil, false +} + +// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup +// order: +// +// 1. A decoder registered for the exact type. If the given type is an interface, a decoder +// registered using RegisterTypeDecoder for that interface will be selected. +// +// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by +// a pointer to the type. +// +// 3. A decoder registered using RegisterKindDecoder for the kind of value. +// +// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for +// concurrent use by multiple goroutines after all codecs and decoders are registered. +func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { + if valueType == nil { + return nil, ErrNilType + } + dec, found := r.lookupTypeDecoder(valueType) + if found { + if dec == nil { + return nil, ErrNoDecoder{Type: valueType} + } + return dec, nil + } + + dec, found = r.lookupInterfaceDecoder(valueType, true) + if found { + return r.storeTypeDecoder(valueType, dec), nil + } + + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil + } + return nil, ErrNoDecoder{Type: valueType} +} + +func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { + return r.typeDecoders.Load(valueType) +} + +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) +} + +func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { + for _, idec := range r.interfaceDecoders { + if valueType.Implements(idec.i) { + return idec.vd, true + } + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(valueType, false) + if !found { + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) + } + return newCondAddrDecoder(idec.vd, defaultDec), true + } + } + return nil, false +} + +// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON +// type. If no type is found, ErrNoTypeMapEntry is returned. +// +// LookupTypeMapEntry should not be called concurrently with any other Registry method. +func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { + return nil, ErrNoTypeMapEntry{Type: bt} + } + return v.(reflect.Type), nil +} + +type interfaceValueEncoder struct { + i reflect.Type + ve ValueEncoder +} + +type interfaceValueDecoder struct { + i reflect.Type + vd ValueDecoder +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go new file mode 100644 index 00000000000..14c9fd25646 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -0,0 +1,214 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var defaultSliceCodec = NewSliceCodec() + +// SliceCodec is the Codec used for slice values. +// +// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To +// configure the slice encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go slices as empty +// BSON arrays, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in SliceCodec for the corresponding +// settings. +type SliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. + EncodeNilAsEmpty bool +} + +// NewSliceCodec returns a MapCodec with options opts. +// +// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See +// [SliceCodec] for more details. +func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { + sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) + + codec := SliceCodec{} + if sliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for slice types. +func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { + return vw.WriteNull() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + byteSlice := make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(byteSlice), val) + return vw.WriteBinary(byteSlice) + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type() == tD || val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if errors.Is(lookupErr, errInvalidValue) { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// DecodeValue is the ValueDecoder for slice types. +func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) + } + val.SetLen(0) + val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) + return nil + case bsontype.String: + if sliceType := val.Type().Elem(); sliceType != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) + } + str, err := vr.ReadString() + if err != nil { + return err + } + byteStr := []byte(str) + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) + } + val.SetLen(0) + val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) + return nil + default: + return fmt.Errorf("cannot decode %v into a slice", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = defaultValueDecoders.decodeD + default: + elemsFunc = defaultValueDecoders.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go new file mode 100644 index 00000000000..a8f885a854f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -0,0 +1,140 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// StringCodec is the Codec used for string values. +// +// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To +// override the default string encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for strings. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.String, myStringEncoder) +// reg.RegisterKindDecoder(reflect.String, myStringDecoder) +type StringCodec struct { + // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. + // If false, a string made from the raw object ID bytes will be used. Defaults to true. + // + // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. + DecodeObjectIDAsHex bool +} + +var ( + defaultStringCodec = NewStringCodec() + + // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultStringCodec +) + +// NewStringCodec returns a StringCodec with options opts. +// +// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See +// [StringCodec] for more details. +func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { + stringOpt := bsonoptions.MergeStringCodecOptions(opts...) + return &StringCodec{*stringOpt.DecodeObjectIDAsHex} +} + +// EncodeValue is the ValueEncoder for string types. +func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t.Kind() != reflect.String { + return emptyValue, ValueDecoderError{ + Name: "StringDecodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: reflect.Zero(t), + } + } + + var str string + var err error + switch vr.Type() { + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + case bsontype.ObjectID: + oid, err := vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + if sc.DecodeObjectIDAsHex { + str = oid.Hex() + } else { + // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. + byteArray := [12]byte(oid) + str = string(byteArray[:]) + } + case bsontype.Symbol: + str, err = vr.ReadSymbol() + if err != nil { + return emptyValue, err + } + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} + } + str = string(data) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + + return reflect.ValueOf(str), nil +} + +// DecodeValue is the ValueDecoder for string types. +func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + elem, err := sc.decodeType(dctx, vr, val.Type()) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go new file mode 100644 index 00000000000..f8d9690c139 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -0,0 +1,736 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. +type DecodeError struct { + keys []string + wrapped error +} + +// Unwrap returns the underlying error +func (de *DecodeError) Unwrap() error { + return de.wrapped +} + +// Error implements the error interface. +func (de *DecodeError) Error() string { + // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the + // stack of BSON keys, so we call de.Keys(), which reverses them. + keyPath := strings.Join(de.Keys(), ".") + return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) +} + +// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down +// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be +// a string, the keys slice will be ["a", "b", "c"]. +func (de *DecodeError) Keys() []string { + reversedKeys := make([]string, 0, len(de.keys)) + for idx := len(de.keys) - 1; idx >= 0; idx-- { + reversedKeys = append(reversedKeys, de.keys[idx]) + } + + return reversedKeys +} + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// StructCodec is the Codec used for struct values. +// +// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. +// To configure the struct encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to omit zero-value structs when +// using the "omitempty" struct tag, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// OmitZeroStruct: true, +// }) +// +// See the deprecation notice for each field in StructCodec for the corresponding +// settings. +type StructCodec struct { + cache sync.Map // map[reflect.Type]*structDescription + parser StructTagParser + + // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. + DecodeZeroStruct bool + + // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. + DecodeDeepZeroInline bool + + // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. + // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag + // option is set. + // + // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. + EncodeOmitDefaultStruct bool + + // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. + // + // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be + // supported in Go Driver 2.0. + AllowUnexportedFields bool + + // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The + // default value is true. + // + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or + // options.BSONOptions.ErrorOnInlineDuplicates instead. + OverwriteDuplicatedInlinedFields bool +} + +var _ ValueEncoder = &StructCodec{} +var _ ValueDecoder = &StructCodec{} + +// NewStructCodec returns a StructCodec that uses p for struct tag parsing. +// +// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See +// [StructCodec] for more details. +func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { + if p == nil { + return nil, errors.New("a StructTagParser must be provided to NewStructCodec") + } + + structOpt := bsonoptions.MergeStructCodecOptions(opts...) + + codec := &StructCodec{ + parser: p, + } + + if structOpt.DecodeZeroStruct != nil { + codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct + } + if structOpt.DecodeDeepZeroInline != nil { + codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline + } + if structOpt.EncodeOmitDefaultStruct != nil { + codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct + } + if structOpt.OverwriteDuplicatedInlinedFields != nil { + codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields + } + if structOpt.AllowUnexportedFields != nil { + codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields + } + + return codec, nil +} + +// EncodeValue handles encoding generic struct types. +func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Struct { + return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) + if err != nil { + return err + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + var rv reflect.Value + for _, desc := range sd.fl { + if desc.inline == nil { + rv = val.Field(desc.idx) + } else { + rv, err = fieldByIndexErr(val, desc.inline) + if err != nil { + continue + } + } + + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) + + if err != nil && !errors.Is(err, errInvalidValue) { + return err + } + + if errors.Is(err, errInvalidValue) { + if desc.omitEmpty { + continue + } + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + err = vw2.WriteNull() + if err != nil { + return err + } + continue + } + + if desc.encoder == nil { + return ErrNoEncoder{Type: rv.Type()} + } + + encoder := desc.encoder + + var empty bool + if cz, ok := encoder.(CodecZeroer); ok { + empty = cz.IsTypeZero(rv.Interface()) + } else if rv.Kind() == reflect.Interface { + // isEmpty will not treat an interface rv as an interface, so we need to check for the + // nil interface separately. + empty = rv.IsNil() + } else { + empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + } + if desc.omitEmpty && empty { + continue + } + + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + + ectx := EncodeContext{ + Registry: ec.Registry, + MinSize: desc.minSize || ec.MinSize, + errorOnInlineDuplicates: ec.errorOnInlineDuplicates, + stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, + nilMapAsEmpty: ec.nilMapAsEmpty, + nilSliceAsEmpty: ec.nilSliceAsEmpty, + nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, + omitZeroStruct: ec.omitZeroStruct, + useJSONStructTags: ec.useJSONStructTags, + } + err = encoder.EncodeValue(ectx, vw2, rv) + if err != nil { + return err + } + } + + if sd.inlineMap >= 0 { + rv := val.Field(sd.inlineMap) + collisionFn := func(key string) bool { + _, exists := sd.fm[key] + return exists + } + + return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) + } + + return dw.WriteDocumentEnd() +} + +func newDecodeError(key string, original error) error { + var de *DecodeError + if !errors.As(original, &de) { + return &DecodeError{ + keys: []string{key}, + wrapped: original, + } + } + + de.keys = append(de.keys, key) + return de +} + +// DecodeValue implements the Codec interface. +// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. +// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. +func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Struct { + return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) + if err != nil { + return err + } + + if sc.DecodeZeroStruct || dc.zeroStructs { + val.Set(reflect.Zero(val.Type())) + } + if sc.DecodeDeepZeroInline && sd.inline { + val.Set(deepZero(val.Type())) + } + + var decoder ValueDecoder + var inlineMap reflect.Value + if sd.inlineMap >= 0 { + inlineMap = val.Field(sd.inlineMap) + decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) + if err != nil { + return err + } + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + for { + name, vr, err := dr.ReadElement() + if errors.Is(err, bsonrw.ErrEOD) { + break + } + if err != nil { + return err + } + + fd, exists := sd.fm[name] + if !exists { + // if the original name isn't found in the struct description, try again with the name in lowercase + // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field + // names + fd, exists = sd.fm[strings.ToLower(name)] + } + + if !exists { + if sd.inlineMap < 0 { + // The encoding/json package requires a flag to return on error for non-existent fields. + // This functionality seems appropriate for the struct codec. + err = vr.Skip() + if err != nil { + return err + } + continue + } + + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + + elem := reflect.New(inlineMap.Type().Elem()).Elem() + dc.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + inlineMap.SetMapIndex(reflect.ValueOf(name), elem) + continue + } + + var field reflect.Value + if fd.inline == nil { + field = val.Field(fd.idx) + } else { + field, err = getInlineField(val, fd.inline) + if err != nil { + return err + } + } + + if !field.CanSet() { // Being settable is a super set of being addressable. + innerErr := fmt.Errorf("field %v is not settable", field) + return newDecodeError(fd.name, innerErr) + } + if field.Kind() == reflect.Ptr && field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + field = field.Addr() + + dctx := DecodeContext{ + Registry: dc.Registry, + Truncate: fd.truncate || dc.Truncate, + defaultDocumentType: dc.defaultDocumentType, + binaryAsSlice: dc.binaryAsSlice, + useJSONStructTags: dc.useJSONStructTags, + useLocalTimeZone: dc.useLocalTimeZone, + zeroMaps: dc.zeroMaps, + zeroStructs: dc.zeroStructs, + } + + if fd.decoder == nil { + return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) + } + + err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) + if err != nil { + return newDecodeError(fd.name, err) + } + } + + return nil +} + +func isEmpty(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() + } + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Struct: + if !omitZeroStruct { + return false + } + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { + continue // Private field + } + if !isEmpty(v.Field(i), omitZeroStruct) { + return false + } + } + return true + } + return !v.IsValid() || v.IsZero() +} + +type structDescription struct { + fm map[string]fieldDescription + fl []fieldDescription + inlineMap int + inline bool +} + +type fieldDescription struct { + name string // BSON key name + fieldName string // struct field name + idx int + omitEmpty bool + minSize bool + truncate bool + inline []int + encoder ValueEncoder + decoder ValueDecoder +} + +type byIndex []fieldDescription + +func (bi byIndex) Len() int { return len(bi) } + +func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } + +func (bi byIndex) Less(i, j int) bool { + // If a field is inlined, its index in the top level struct is stored at inline[0] + iIdx, jIdx := bi[i].idx, bi[j].idx + if len(bi[i].inline) > 0 { + iIdx = bi[i].inline[0] + } + if len(bi[j].inline) > 0 { + jIdx = bi[j].inline[0] + } + if iIdx != jIdx { + return iIdx < jIdx + } + for k, biik := range bi[i].inline { + if k >= len(bi[j].inline) { + return false + } + if biik != bi[j].inline[k] { + return biik < bi[j].inline[k] + } + } + return len(bi[i].inline) < len(bi[j].inline) +} + +func (sc *StructCodec) describeStruct( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { + // We need to analyze the struct, including getting the tags, collecting + // information about inlining, and create a map of the field name to the field. + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil + } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} + +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { + numFields := t.NumField() + sd := &structDescription{ + fm: make(map[string]fieldDescription, numFields), + fl: make([]fieldDescription, 0, numFields), + inlineMap: -1, + } + + var fields []fieldDescription + for i := 0; i < numFields; i++ { + sf := t.Field(i) + if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { + // field is private or unexported fields aren't allowed, ignore + continue + } + + sfType := sf.Type + encoder, err := r.LookupEncoder(sfType) + if err != nil { + encoder = nil + } + decoder, err := r.LookupDecoder(sfType) + if err != nil { + decoder = nil + } + + description := fieldDescription{ + fieldName: sf.Name, + idx: i, + encoder: encoder, + decoder: decoder, + } + + var stags StructTags + // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser + // instead of the parser defined on the codec. + if useJSONStructTags { + stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) + } else { + stags, err = sc.parser.ParseStructTags(sf) + } + if err != nil { + return nil, err + } + if stags.Skip { + continue + } + description.name = stags.Name + description.omitEmpty = stags.OmitEmpty + description.minSize = stags.MinSize + description.truncate = stags.Truncate + + if stags.Inline { + sd.inline = true + switch sfType.Kind() { + case reflect.Map: + if sd.inlineMap >= 0 { + return nil, errors.New("(struct " + t.String() + ") multiple inline maps") + } + if sfType.Key() != tString { + return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") + } + sd.inlineMap = description.idx + case reflect.Ptr: + sfType = sfType.Elem() + if sfType.Kind() != reflect.Struct { + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + fallthrough + case reflect.Struct: + inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + for _, fd := range inlinesf.fl { + if fd.inline == nil { + fd.inline = []int{i, fd.idx} + } else { + fd.inline = append([]int{i}, fd.inline...) + } + fields = append(fields, fd) + + } + default: + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + continue + } + fields = append(fields, description) + } + + // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name + sort.Slice(fields, func(i, j int) bool { + x := fields + // sort field by name, breaking ties with depth, then + // breaking ties with index sequence. + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].inline) != len(x[j].inline) { + return len(x[i].inline) < len(x[j].inline) + } + return byIndex(x).Less(i, j) + }) + + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + sd.fl = append(sd.fl, fi) + sd.fm[name] = fi + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { + return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) + } + sd.fl = append(sd.fl, dominant) + sd.fm[name] = dominant + } + + sort.Sort(byIndex(sd.fl)) + + return sd, nil +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's inlining rules. If there are multiple top-level +// fields, the boolean will be false: This condition is an error in Go +// and we skip all the fields. +func dominantField(fields []fieldDescription) (fieldDescription, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level. + if len(fields) > 1 && + len(fields[0].inline) == len(fields[1].inline) { + return fieldDescription{}, false + } + return fields[0], true +} + +func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { + defer func() { + if recovered := recover(); recovered != nil { + switch r := recovered.(type) { + case string: + err = fmt.Errorf("%s", r) + case error: + err = r + } + } + }() + + result = v.FieldByIndex(index) + return +} + +func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { + field, err := fieldByIndexErr(val, index) + if err == nil { + return field, nil + } + + // if parent of this element doesn't exist, fix its parent + inlineParent := index[:len(index)-1] + var fParent reflect.Value + if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { + fParent, err = getInlineField(val, inlineParent) + if err != nil { + return fParent, err + } + } + fParent.Set(reflect.New(fParent.Type().Elem())) + + return fieldByIndexErr(val, index) +} + +// DeepZero returns recursive zero object +func deepZero(st reflect.Type) (result reflect.Value) { + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) + } + } + } + } + return result +} + +// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside +func recursivePointerTo(v reflect.Value) reflect.Value { + v = reflect.Indirect(v) + result := reflect.New(v.Type()) + if v.Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + if f := v.Field(i); f.Kind() == reflect.Ptr { + if f.Elem().Kind() == reflect.Struct { + result.Elem().Field(i).Set(recursivePointerTo(f)) + } + } + } + } + + return result +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go new file mode 100644 index 00000000000..18d85bfb031 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -0,0 +1,148 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "strings" +) + +// StructTagParser returns the struct tags for a given struct field. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTagParser interface { + ParseStructTags(reflect.StructField) (StructTags, error) +} + +// StructTagParserFunc is an adapter that allows a generic function to be used +// as a StructTagParser. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTagParserFunc func(reflect.StructField) (StructTags, error) + +// ParseStructTags implements the StructTagParser interface. +func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { + return stpf(sf) +} + +// StructTags represents the struct tag fields that the StructCodec uses during +// the encoding and decoding process. +// +// In the case of a struct, the lowercased field name is used as the key for each exported +// field but this behavior may be changed using a struct tag. The tag may also contain flags to +// adjust the marshalling behavior for the field. +// +// The properties are defined below: +// +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. +// +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. +// +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. +// +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. +// +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTags struct { + Name string + OmitEmpty bool + MinSize bool + Truncate bool + Inline bool + Skip bool +} + +// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. +// It will handle the bson struct tag. See the documentation for StructTags to see +// what each of the returned fields means. +// +// If there is no name in the struct tag fields, the struct field name is lowercased. +// The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// An example: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +// A struct tag either consisting entirely of '-' or with a bson key with a +// value consisting entirely of '-' will return a StructTags with Skip true and +// the remaining fields will be their default values. +// +// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. +var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + return parseTags(key, tag) +} + +func parseTags(key string, tag string) (StructTags, error) { + var st StructTags + if tag == "-" { + st.Skip = true + return st, nil + } + + for idx, str := range strings.Split(tag, ",") { + if idx == 0 && str != "" { + key = str + } + switch str { + case "omitempty": + st.OmitEmpty = true + case "minsize": + st.MinSize = true + case "truncate": + st.Truncate = true + case "inline": + st.Inline = true + } + } + + st.Name = key + + return st, nil +} + +// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser +// but will also fallback to parsing the json tag instead on a field where the +// bson tag isn't available. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and +// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok { + tag, ok = sf.Tag.Lookup("json") + } + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + + return parseTags(key, tag) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go new file mode 100644 index 00000000000..22fb762c415 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -0,0 +1,151 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +const ( + timeFormatString = "2006-01-02T15:04:05.999Z07:00" +) + +// TimeCodec is the Codec used for time.Time values. +// +// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. +// To configure the time.Time encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to ..., use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// UseLocalTimeZone: true, +// }) +// +// See the deprecation notice for each field in TimeCodec for the corresponding +// settings. +type TimeCodec struct { + // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. + // + // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone + // instead. + UseLocalTimeZone bool +} + +var ( + defaultTimeCodec = NewTimeCodec() + + // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. + _ typeDecoder = defaultTimeCodec +) + +// NewTimeCodec returns a TimeCodec with options opts. +// +// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See +// [TimeCodec] for more details. +func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { + timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) + + codec := TimeCodec{} + if timeOpt.UseLocalTimeZone != nil { + codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone + } + return &codec +} + +func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tTime { + return emptyValue, ValueDecoderError{ + Name: "TimeDecodeValue", + Types: []reflect.Type{tTime}, + Received: reflect.Zero(t), + } + } + + var timeVal time.Time + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err := vr.ReadDateTime() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(dt/1000, dt%1000*1000000) + case bsontype.String: + // assume strings are in the isoTimeFormat + timeStr, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + timeVal, err = time.Parse(timeFormatString, timeStr) + if err != nil { + return emptyValue, err + } + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(i64/1000, i64%1000*1000000) + case bsontype.Timestamp: + t, _, err := vr.ReadTimestamp() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(int64(t), 0) + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) + } + + if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { + timeVal = timeVal.UTC() + } + return reflect.ValueOf(timeVal), nil +} + +// DecodeValue is the ValueDecoderFunc for time.Time. +func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + elem, err := tc.decodeType(dc, vr, tTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// EncodeValue is the ValueEncoderFunc for time.TIme. +func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go new file mode 100644 index 00000000000..6ade17b7d3f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -0,0 +1,58 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "net/url" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var tBool = reflect.TypeOf(false) +var tFloat64 = reflect.TypeOf(float64(0)) +var tInt32 = reflect.TypeOf(int32(0)) +var tInt64 = reflect.TypeOf(int64(0)) +var tString = reflect.TypeOf("") +var tTime = reflect.TypeOf(time.Time{}) + +var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() +var tByteSlice = reflect.TypeOf([]byte(nil)) +var tByte = reflect.TypeOf(byte(0x00)) +var tURL = reflect.TypeOf(url.URL{}) +var tJSONNumber = reflect.TypeOf(json.Number("")) + +var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() +var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() +var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() +var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() + +var tBinary = reflect.TypeOf(primitive.Binary{}) +var tUndefined = reflect.TypeOf(primitive.Undefined{}) +var tOID = reflect.TypeOf(primitive.ObjectID{}) +var tDateTime = reflect.TypeOf(primitive.DateTime(0)) +var tNull = reflect.TypeOf(primitive.Null{}) +var tRegex = reflect.TypeOf(primitive.Regex{}) +var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) +var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) +var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) +var tSymbol = reflect.TypeOf(primitive.Symbol("")) +var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) +var tDecimal = reflect.TypeOf(primitive.Decimal128{}) +var tMinKey = reflect.TypeOf(primitive.MinKey{}) +var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) +var tD = reflect.TypeOf(primitive.D{}) +var tA = reflect.TypeOf(primitive.A{}) +var tE = reflect.TypeOf(primitive.E{}) + +var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) +var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go new file mode 100644 index 00000000000..39b07135b18 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -0,0 +1,202 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "math" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// UIntCodec is the Codec used for uint values. +// +// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To +// configure the uint encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal Go uint values as the +// minimum BSON int size that can represent the value, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// IntMinSize: true, +// }) +// +// See the deprecation notice for each field in UIntCodec for the corresponding +// settings. +type UIntCodec struct { + // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the + // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. + EncodeToMinSize bool +} + +var ( + defaultUIntCodec = NewUIntCodec() + + // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. + _ typeDecoder = defaultUIntCodec +) + +// NewUIntCodec returns a UIntCodec with options opts. +// +// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See +// [UIntCodec] for more details. +func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { + uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) + + codec := UIntCodec{} + if uintOpt.EncodeToMinSize != nil { + codec.EncodeToMinSize = *uintOpt.EncodeToMinSize + } + return &codec +} + +// EncodeValue is the ValueEncoder for uint types. +func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + + // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 + useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) + + if u64 <= math.MaxInt32 && useMinSize { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return emptyValue, err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return emptyValue, errCannotTruncate + } + if f64 > float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return emptyValue, fmt.Errorf("%d overflows uint8", i64) + } + + return reflect.ValueOf(uint8(i64)), nil + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return emptyValue, fmt.Errorf("%d overflows uint16", i64) + } + + return reflect.ValueOf(uint16(i64)), nil + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return emptyValue, fmt.Errorf("%d overflows uint32", i64) + } + + return reflect.ValueOf(uint32(i64)), nil + case reflect.Uint64: + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint64", i64) + } + + return reflect.ValueOf(uint64(i64)), nil + case reflect.Uint: + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + v := uint64(i64) + if v > math.MaxUint { // Can we fit this inside of an uint + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + + return reflect.ValueOf(uint(v)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.Zero(t), + } + } +} + +// DecodeValue is the ValueDecoder for uint types. +func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + elem, err := uic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetUint(elem.Uint()) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go new file mode 100644 index 00000000000..996bd17127a --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type ByteSliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +} + +// ByteSliceCodec creates a new *ByteSliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func ByteSliceCodec() *ByteSliceCodecOptions { + return &ByteSliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { + bs.EncodeNilAsEmpty = &b + return bs +} + +// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { + bs := ByteSliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return bs +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 00000000000..c40973c8d43 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go new file mode 100644 index 00000000000..f522c7e03fe --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type EmptyInterfaceCodecOptions struct { + DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +} + +// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { + return &EmptyInterfaceCodecOptions{} +} + +// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { + e.DecodeBinaryAsSlice = &b + return e +} + +// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { + e := EmptyInterfaceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeBinaryAsSlice != nil { + e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice + } + } + + return e +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go new file mode 100644 index 00000000000..a7a7c1d9804 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -0,0 +1,82 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// MapCodecOptions represents all possible options for map encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type MapCodecOptions struct { + DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. + EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. + // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must + // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a + // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the + // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override + // TextMarshaler/TextUnmarshaler. Defaults to false. + EncodeKeysWithStringer *bool +} + +// MapCodec creates a new *MapCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func MapCodec() *MapCodecOptions { + return &MapCodecOptions{} +} + +// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { + t.DecodeZerosMap = &b + return t +} + +// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { + t.EncodeNilAsEmpty = &b + return t +} + +// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the +// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key +// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with +// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer +// will override TextMarshaler/TextUnmarshaler. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { + t.EncodeKeysWithStringer = &b + return t +} + +// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { + s := MapCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeZerosMap != nil { + s.DecodeZerosMap = opt.DecodeZerosMap + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + if opt.EncodeKeysWithStringer != nil { + s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go new file mode 100644 index 00000000000..3c1e4f35ba1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// SliceCodecOptions represents all possible options for slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type SliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +} + +// SliceCodec creates a new *SliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func SliceCodec() *SliceCodecOptions { + return &SliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { + s.EncodeNilAsEmpty = &b + return s +} + +// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { + s := SliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go new file mode 100644 index 00000000000..f8b76f996e4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -0,0 +1,52 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultDecodeOIDAsHex = true + +// StringCodecOptions represents all possible options for string encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type StringCodecOptions struct { + DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. +} + +// StringCodec creates a new *StringCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func StringCodec() *StringCodecOptions { + return &StringCodecOptions{} +} + +// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made +// from the raw object ID bytes will be used. Defaults to true. +// +// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. +func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { + t.DecodeObjectIDAsHex = &b + return t +} + +// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { + s := &StringCodecOptions{&defaultDecodeOIDAsHex} + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeObjectIDAsHex != nil { + s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go new file mode 100644 index 00000000000..1cbfa32e8b4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -0,0 +1,107 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultOverwriteDuplicatedInlinedFields = true + +// StructCodecOptions represents all possible options for struct encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type StructCodecOptions struct { + DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. + DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. + EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. + AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. + OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. +} + +// StructCodec creates a new *StructCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func StructCodec() *StructCodecOptions { + return &StructCodecOptions{} +} + +// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { + t.DecodeZeroStruct = &b + return t +} + +// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. +func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { + t.DecodeDeepZeroInline = &b + return t +} + +// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all +// its values set to their default value. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { + t.EncodeOmitDefaultStruct = &b + return t +} + +// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the +// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when +// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if +// there are duplicate keys after the struct is inlined. Defaults to true. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { + t.OverwriteDuplicatedInlinedFields = &b + return t +} + +// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +// +// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be +// supported in Go Driver 2.0. +func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { + t.AllowUnexportedFields = &b + return t +} + +// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { + s := &StructCodecOptions{ + OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, + } + for _, opt := range opts { + if opt == nil { + continue + } + + if opt.DecodeZeroStruct != nil { + s.DecodeZeroStruct = opt.DecodeZeroStruct + } + if opt.DecodeDeepZeroInline != nil { + s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline + } + if opt.EncodeOmitDefaultStruct != nil { + s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct + } + if opt.OverwriteDuplicatedInlinedFields != nil { + s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields + } + if opt.AllowUnexportedFields != nil { + s.AllowUnexportedFields = opt.AllowUnexportedFields + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go new file mode 100644 index 00000000000..3f38433d226 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// TimeCodecOptions represents all possible options for time.Time encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type TimeCodecOptions struct { + UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. +} + +// TimeCodec creates a new *TimeCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func TimeCodec() *TimeCodecOptions { + return &TimeCodecOptions{} +} + +// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { + t.UseLocalTimeZone = &b + return t +} + +// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { + t := TimeCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.UseLocalTimeZone != nil { + t.UseLocalTimeZone = opt.UseLocalTimeZone + } + } + + return t +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go new file mode 100644 index 00000000000..5091e4d9633 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// UIntCodecOptions represents all possible options for uint encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type UIntCodecOptions struct { + EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +} + +// UIntCodec creates a new *UIntCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func UIntCodec() *UIntCodecOptions { + return &UIntCodecOptions{} +} + +// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. +func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { + u.EncodeToMinSize = &b + return u +} + +// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { + u := UIntCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeToMinSize != nil { + u.EncodeToMinSize = opt.EncodeToMinSize + } + } + + return u +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go new file mode 100644 index 00000000000..1e25570b855 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -0,0 +1,489 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "errors" + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// Copier is a type that allows copying between ValueReaders, ValueWriters, and +// []byte values. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +type Copier struct{} + +// NewCopier creates a new copier with the given registry. If a nil registry is provided +// a default registry is used. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func NewCopier() Copier { + return Copier{} +} + +// CopyDocument handles copying a document from src to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func CopyDocument(dst ValueWriter, src ValueReader) error { + return Copier{}.CopyDocument(dst, src) +} + +// CopyDocument handles copying one document from the src to the dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { + dr, err := src.ReadDocument() + if err != nil { + return err + } + + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + return c.copyDocumentCore(dw, dr) +} + +// CopyArrayFromBytes copies the values from a BSON array represented as a +// []byte to a ValueWriter. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { + aw, err := dst.WriteArray() + if err != nil { + return err + } + + err = c.CopyBytesToArrayWriter(aw, src) + if err != nil { + return err + } + + return aw.WriteArrayEnd() +} + +// CopyDocumentFromBytes copies the values from a BSON document represented as a +// []byte to a ValueWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + err = c.CopyBytesToDocumentWriter(dw, src) + if err != nil { + return err + } + + return dw.WriteDocumentEnd() +} + +type writeElementFn func(key string) (ValueWriter, error) + +// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an +// ArrayWriter. +// +// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go +// Driver 2.0. +func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { + wef := func(_ string) (ValueWriter, error) { + return dst.WriteArrayElement() + } + + return c.copyBytesToValueWriter(src, wef) +} + +// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a +// DocumentWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { + wef := func(key string) (ValueWriter, error) { + return dst.WriteDocumentElement(key) + } + + return c.copyBytesToValueWriter(src, wef) +} + +func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { + // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. + length, rem, ok := bsoncore.ReadLength(src) + if !ok { + return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) + } + if len(src) < int(length) { + return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) + } + rem = rem[:length-4] + + var t bsontype.Type + var key string + var val bsoncore.Value + for { + t, rem, ok = bsoncore.ReadType(rem) + if !ok { + return io.EOF + } + if t == bsontype.Type(0) { + if len(rem) != 0 { + return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) + } + break + } + + key, rem, ok = bsoncore.ReadKey(rem) + if !ok { + return fmt.Errorf("invalid key found. remaining bytes=%v", rem) + } + + // write as either array element or document element using writeElementFn + vw, err := wef(key) + if err != nil { + return err + } + + val, rem, ok = bsoncore.ReadValue(rem, t) + if !ok { + return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) + } + err = c.CopyValueFromBytes(vw, t, val.Data) + if err != nil { + return err + } + } + return nil +} + +// CopyDocumentToBytes copies an entire document from the ValueReader and +// returns it as bytes. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { + return c.AppendDocumentBytes(nil, src) +} + +// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will +// append the result to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + vw.reset(dst) + + err := c.CopyDocument(vw, src) + dst = vw.buf + return dst, err +} + +// AppendArrayBytes copies an array from the ValueReader to dst. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + vw.reset(dst) + + err := c.copyArray(vw, src) + dst = vw.buf + return dst, err +} + +// CopyValueFromBytes will write the value represtend by t and src to dst. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. +func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { + if wvb, ok := dst.(BytesWriter); ok { + return wvb.WriteValueBytes(t, src) + } + + vr := vrPool.Get().(*valueReader) + defer vrPool.Put(vr) + + vr.reset(src) + vr.pushElement(t) + + return c.CopyValue(dst, vr) +} + +// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a +// []byte. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. +func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { + return c.AppendValueBytes(nil, src) +} + +// AppendValueBytes functions the same as CopyValueToBytes, but will append the +// result to dst. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. +func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { + if br, ok := src.(BytesReader); ok { + return br.ReadValueBytes(dst) + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + start := len(dst) + + vw.reset(dst) + vw.push(mElement) + + err := c.CopyValue(vw, src) + if err != nil { + return 0, dst, err + } + + return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil +} + +// CopyValue will copy a single value from src to dst. +// +// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { + var err error + switch src.Type() { + case bsontype.Double: + var f64 float64 + f64, err = src.ReadDouble() + if err != nil { + break + } + err = dst.WriteDouble(f64) + case bsontype.String: + var str string + str, err = src.ReadString() + if err != nil { + return err + } + err = dst.WriteString(str) + case bsontype.EmbeddedDocument: + err = c.CopyDocument(dst, src) + case bsontype.Array: + err = c.copyArray(dst, src) + case bsontype.Binary: + var data []byte + var subtype byte + data, subtype, err = src.ReadBinary() + if err != nil { + break + } + err = dst.WriteBinaryWithSubtype(data, subtype) + case bsontype.Undefined: + err = src.ReadUndefined() + if err != nil { + break + } + err = dst.WriteUndefined() + case bsontype.ObjectID: + var oid primitive.ObjectID + oid, err = src.ReadObjectID() + if err != nil { + break + } + err = dst.WriteObjectID(oid) + case bsontype.Boolean: + var b bool + b, err = src.ReadBoolean() + if err != nil { + break + } + err = dst.WriteBoolean(b) + case bsontype.DateTime: + var dt int64 + dt, err = src.ReadDateTime() + if err != nil { + break + } + err = dst.WriteDateTime(dt) + case bsontype.Null: + err = src.ReadNull() + if err != nil { + break + } + err = dst.WriteNull() + case bsontype.Regex: + var pattern, options string + pattern, options, err = src.ReadRegex() + if err != nil { + break + } + err = dst.WriteRegex(pattern, options) + case bsontype.DBPointer: + var ns string + var pointer primitive.ObjectID + ns, pointer, err = src.ReadDBPointer() + if err != nil { + break + } + err = dst.WriteDBPointer(ns, pointer) + case bsontype.JavaScript: + var js string + js, err = src.ReadJavascript() + if err != nil { + break + } + err = dst.WriteJavascript(js) + case bsontype.Symbol: + var symbol string + symbol, err = src.ReadSymbol() + if err != nil { + break + } + err = dst.WriteSymbol(symbol) + case bsontype.CodeWithScope: + var code string + var srcScope DocumentReader + code, srcScope, err = src.ReadCodeWithScope() + if err != nil { + break + } + + var dstScope DocumentWriter + dstScope, err = dst.WriteCodeWithScope(code) + if err != nil { + break + } + err = c.copyDocumentCore(dstScope, srcScope) + case bsontype.Int32: + var i32 int32 + i32, err = src.ReadInt32() + if err != nil { + break + } + err = dst.WriteInt32(i32) + case bsontype.Timestamp: + var t, i uint32 + t, i, err = src.ReadTimestamp() + if err != nil { + break + } + err = dst.WriteTimestamp(t, i) + case bsontype.Int64: + var i64 int64 + i64, err = src.ReadInt64() + if err != nil { + break + } + err = dst.WriteInt64(i64) + case bsontype.Decimal128: + var d128 primitive.Decimal128 + d128, err = src.ReadDecimal128() + if err != nil { + break + } + err = dst.WriteDecimal128(d128) + case bsontype.MinKey: + err = src.ReadMinKey() + if err != nil { + break + } + err = dst.WriteMinKey() + case bsontype.MaxKey: + err = src.ReadMaxKey() + if err != nil { + break + } + err = dst.WriteMaxKey() + default: + err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) + } + + return err +} + +func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { + ar, err := src.ReadArray() + if err != nil { + return err + } + + aw, err := dst.WriteArray() + if err != nil { + return err + } + + for { + vr, err := ar.ReadValue() + if errors.Is(err, ErrEOA) { + break + } + if err != nil { + return err + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return aw.WriteArrayEnd() +} + +func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { + for { + key, vr, err := dr.ReadElement() + if errors.Is(err, ErrEOD) { + break + } + if err != nil { + return err + } + + vw, err := dw.WriteDocumentElement(key) + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go new file mode 100644 index 00000000000..750b0d2af51 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonrw contains abstractions for reading and writing +// BSON and BSON like types from sources. +package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go new file mode 100644 index 00000000000..f0702d9d302 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -0,0 +1,806 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +const maxNestingDepth = 200 + +// ErrInvalidJSON indicates the JSON input is invalid +var ErrInvalidJSON = errors.New("invalid JSON input") + +type jsonParseState byte + +const ( + jpsStartState jsonParseState = iota + jpsSawBeginObject + jpsSawEndObject + jpsSawBeginArray + jpsSawEndArray + jpsSawColon + jpsSawComma + jpsSawKey + jpsSawValue + jpsDoneState + jpsInvalidState +) + +type jsonParseMode byte + +const ( + jpmInvalidMode jsonParseMode = iota + jpmObjectMode + jpmArrayMode +) + +type extJSONValue struct { + t bsontype.Type + v interface{} +} + +type extJSONObject struct { + keys []string + values []*extJSONValue +} + +type extJSONParser struct { + js *jsonScanner + s jsonParseState + m []jsonParseMode + k string + v *extJSONValue + + err error + canonical bool + depth int + maxDepth int + + emptyObject bool + relaxedUUID bool +} + +// newExtJSONParser returns a new extended JSON parser, ready to to begin +// parsing from the first character of the argued json input. It will not +// perform any read-ahead and will therefore not report any errors about +// malformed JSON at this point. +func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { + return &extJSONParser{ + js: &jsonScanner{r: r}, + s: jpsStartState, + m: []jsonParseMode{}, + canonical: canonical, + maxDepth: maxNestingDepth, + } +} + +// peekType examines the next value and returns its BSON Type +func (ejp *extJSONParser) peekType() (bsontype.Type, error) { + var t bsontype.Type + var err error + initialState := ejp.s + + ejp.advanceState() + switch ejp.s { + case jpsSawValue: + t = ejp.v.t + case jpsSawBeginArray: + t = bsontype.Array + case jpsInvalidState: + err = ejp.err + case jpsSawComma: + // in array mode, seeing a comma means we need to progress again to actually observe a type + if ejp.peekMode() == jpmArrayMode { + return ejp.peekType() + } + case jpsSawEndArray: + // this would only be a valid state if we were in array mode, so return end-of-array error + err = ErrEOA + case jpsSawBeginObject: + // peek key to determine type + ejp.advanceState() + switch ejp.s { + case jpsSawEndObject: // empty embedded document + t = bsontype.EmbeddedDocument + ejp.emptyObject = true + case jpsInvalidState: + err = ejp.err + case jpsSawKey: + if initialState == jpsStartState { + return bsontype.EmbeddedDocument, nil + } + t = wrapperKeyBSONType(ejp.k) + + // if $uuid is encountered, parse as binary subtype 4 + if ejp.k == "$uuid" { + ejp.relaxedUUID = true + t = bsontype.Binary + } + + switch t { + case bsontype.JavaScript: + // just saw $code, need to check for $scope at same level + _, err = ejp.readValue(bsontype.JavaScript) + if err != nil { + break + } + + switch ejp.s { + case jpsSawEndObject: // type is TypeJavaScript + case jpsSawComma: + ejp.advanceState() + + if ejp.s == jpsSawKey && ejp.k == "$scope" { + t = bsontype.CodeWithScope + } else { + err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) + } + case jpsInvalidState: + err = ejp.err + default: + err = ErrInvalidJSON + } + case bsontype.CodeWithScope: + err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") + } + } + } + + return t, err +} + +// readKey parses the next key and its type and returns them +func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { + if ejp.emptyObject { + ejp.emptyObject = false + return "", 0, ErrEOD + } + + // advance to key (or return with error) + switch ejp.s { + case jpsStartState: + ejp.advanceState() + if ejp.s == jpsSawBeginObject { + ejp.advanceState() + } + case jpsSawBeginObject: + ejp.advanceState() + case jpsSawValue, jpsSawEndObject, jpsSawEndArray: + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject, jpsSawComma: + ejp.advanceState() + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsDoneState: + return "", 0, io.EOF + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, ErrInvalidJSON + } + case jpsSawKey: // do nothing (key was peeked before) + default: + return "", 0, invalidRequestError("key") + } + + // read key + var key string + + switch ejp.s { + case jpsSawKey: + key = ejp.k + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, invalidRequestError("key") + } + + // check for colon + ejp.advanceState() + if err := ensureColon(ejp.s, key); err != nil { + return "", 0, err + } + + // peek at the value to determine type + t, err := ejp.peekType() + if err != nil { + return "", 0, err + } + + return key, t, nil +} + +// readValue returns the value corresponding to the Type returned by peekType +func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { + if ejp.s == jpsInvalidState { + return nil, ejp.err + } + + var v *extJSONValue + + switch t { + case bsontype.Null, bsontype.Boolean, bsontype.String: + if ejp.s != jpsSawValue { + return nil, invalidRequestError(t.String()) + } + v = ejp.v + case bsontype.Int32, bsontype.Int64, bsontype.Double: + // relaxed version allows these to be literal number values + if ejp.s == jpsSawValue { + v = ejp.v + break + } + fallthrough + case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { + return nil, invalidJSONErrorForType("value", t) + } + + v = ejp.v + + // read end object + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("} after value", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: + if ejp.s != jpsSawKey { + return nil, invalidRequestError(t.String()) + } + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + if t == bsontype.Binary && ejp.s == jpsSawValue { + // convert relaxed $uuid format + if ejp.relaxedUUID { + defer func() { ejp.relaxedUUID = false }() + uuid, err := ejp.v.parseSymbol() + if err != nil { + return nil, err + } + + // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing + // in the 8th, 13th, 18th, and 23rd characters. + // + // See https://tools.ietf.org/html/rfc4122#section-3 + valid := len(uuid) == 36 && + string(uuid[8]) == "-" && + string(uuid[13]) == "-" && + string(uuid[18]) == "-" && + string(uuid[23]) == "-" + if !valid { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // remove hyphens + uuidNoHyphens := strings.ReplaceAll(uuid, "-", "") + if len(uuidNoHyphens) != 32 { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // convert hex to bytes + bytes, err := hex.DecodeString(uuidNoHyphens) + if err != nil { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) + } + + base64 := &extJSONValue{ + t: bsontype.String, + v: base64.StdEncoding.EncodeToString(bytes), + } + subType := &extJSONValue{ + t: bsontype.String, + v: "04", + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + + break + } + + // convert legacy $binary format + base64 := ejp.v + + ejp.advanceState() + if ejp.s != jpsSawComma { + return nil, invalidJSONErrorForType(",", bsontype.Binary) + } + + ejp.advanceState() + key, t, err := ejp.readKey() + if err != nil { + return nil, err + } + if key != "$type" { + return nil, invalidJSONErrorForType("$type", bsontype.Binary) + } + + subType, err := ejp.readValue(t) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + break + } + + // read KV pairs + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONErrorForType("{", t) + } + + keys, vals, err := ejp.readObject(2, true) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) + } + + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + + case bsontype.DateTime: + switch ejp.s { + case jpsSawValue: + v = ejp.v + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject: + keys, vals, err := ejp.readObject(1, true) + if err != nil { + return nil, err + } + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + case jpsSawValue: + if ejp.canonical { + return nil, invalidJSONError("{") + } + v = ejp.v + default: + if ejp.canonical { + return nil, invalidJSONErrorForType("object", t) + } + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("value and then }", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.JavaScript: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue { + return nil, invalidJSONErrorForType("value", t) + } + v = ejp.v + + // read end object or comma and just return + ejp.advanceState() + case jpsSawEndObject: + v = ejp.v + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.CodeWithScope: + if ejp.s == jpsSawKey && ejp.k == "$scope" { + v = ejp.v // this is the $code string from earlier + + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONError("$scope to be embedded document") + } + } else { + return nil, invalidRequestError(t.String()) + } + case bsontype.EmbeddedDocument, bsontype.Array: + return nil, invalidRequestError(t.String()) + } + + return v, nil +} + +// readObject is a utility method for reading full objects of known (or expected) size +// it is useful for extended JSON types such as binary, datetime, regex, and timestamp +func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { + keys := make([]string, numKeys) + vals := make([]*extJSONValue, numKeys) + + if !started { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, nil, invalidJSONError("{") + } + } + + for i := 0; i < numKeys; i++ { + key, t, err := ejp.readKey() + if err != nil { + return nil, nil, err + } + + switch ejp.s { + case jpsSawKey: + v, err := ejp.readValue(t) + if err != nil { + return nil, nil, err + } + + keys[i] = key + vals[i] = v + case jpsSawValue: + keys[i] = key + vals[i] = ejp.v + default: + return nil, nil, invalidJSONError("value") + } + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, nil, invalidJSONError("}") + } + + return keys, vals, nil +} + +// advanceState reads the next JSON token from the scanner and transitions +// from the current state based on that token's type +func (ejp *extJSONParser) advanceState() { + if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { + return + } + + jt, err := ejp.js.nextToken() + + if err != nil { + ejp.err = err + ejp.s = jpsInvalidState + return + } + + valid := ejp.validateToken(jt.t) + if !valid { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + return + } + + switch jt.t { + case jttBeginObject: + ejp.s = jpsSawBeginObject + ejp.pushMode(jpmObjectMode) + ejp.depth++ + + if ejp.depth > ejp.maxDepth { + ejp.err = nestingDepthError(jt.p, ejp.depth) + ejp.s = jpsInvalidState + } + case jttEndObject: + ejp.s = jpsSawEndObject + ejp.depth-- + + if ejp.popMode() != jpmObjectMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttBeginArray: + ejp.s = jpsSawBeginArray + ejp.pushMode(jpmArrayMode) + case jttEndArray: + ejp.s = jpsSawEndArray + + if ejp.popMode() != jpmArrayMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttColon: + ejp.s = jpsSawColon + case jttComma: + ejp.s = jpsSawComma + case jttEOF: + ejp.s = jpsDoneState + if len(ejp.m) != 0 { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttString: + switch ejp.s { + case jpsSawComma: + if ejp.peekMode() == jpmArrayMode { + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + return + } + fallthrough + case jpsSawBeginObject: + ejp.s = jpsSawKey + ejp.k = jt.v.(string) + return + } + fallthrough + default: + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + } +} + +var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ + jpsStartState: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + jttEOF: true, + }, + jpsSawBeginObject: { + jttEndObject: true, + jttString: true, + }, + jpsSawEndObject: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawBeginArray: { + jttBeginObject: true, + jttBeginArray: true, + jttEndArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawEndArray: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawColon: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawComma: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawKey: { + jttColon: true, + }, + jpsSawValue: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsDoneState: {}, + jpsInvalidState: {}, +} + +func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { + switch ejp.s { + case jpsSawEndObject: + // if we are at depth zero and the next token is a '{', + // we can consider it valid only if we are not in array mode. + if jtt == jttBeginObject && ejp.depth == 0 { + return ejp.peekMode() != jpmArrayMode + } + case jpsSawComma: + switch ejp.peekMode() { + // the only valid next token after a comma inside a document is a string (a key) + case jpmObjectMode: + return jtt == jttString + case jpmInvalidMode: + return false + } + } + + _, ok := jpsValidTransitionTokens[ejp.s][jtt] + return ok +} + +// ensureExtValueType returns true if the current value has the expected +// value type for single-key extended JSON types. For example, +// {"$numberInt": v} v must be TypeString +func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { + switch t { + case bsontype.MinKey, bsontype.MaxKey: + return ejp.v.t == bsontype.Int32 + case bsontype.Undefined: + return ejp.v.t == bsontype.Boolean + case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: + return ejp.v.t == bsontype.String + default: + return false + } +} + +func (ejp *extJSONParser) pushMode(m jsonParseMode) { + ejp.m = append(ejp.m, m) +} + +func (ejp *extJSONParser) popMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + m := ejp.m[l-1] + ejp.m = ejp.m[:l-1] + + return m +} + +func (ejp *extJSONParser) peekMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + return ejp.m[l-1] +} + +func extendJSONToken(jt *jsonToken) *extJSONValue { + var t bsontype.Type + + switch jt.t { + case jttInt32: + t = bsontype.Int32 + case jttInt64: + t = bsontype.Int64 + case jttDouble: + t = bsontype.Double + case jttString: + t = bsontype.String + case jttBool: + t = bsontype.Boolean + case jttNull: + t = bsontype.Null + default: + return nil + } + + return &extJSONValue{t: t, v: jt.v} +} + +func ensureColon(s jsonParseState, key string) error { + if s != jpsSawColon { + return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) + } + + return nil +} + +func invalidRequestError(s string) error { + return fmt.Errorf("invalid request to read %s", s) +} + +func invalidJSONError(expected string) error { + return fmt.Errorf("invalid JSON input; expected %s", expected) +} + +func invalidJSONErrorForType(expected string, t bsontype.Type) error { + return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) +} + +func unexpectedTokenError(jt *jsonToken) error { + switch jt.t { + case jttInt32, jttInt64, jttDouble: + return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) + case jttString: + return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) + case jttBool: + return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) + case jttNull: + return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) + case jttEOF: + return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) + default: + return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) + } +} + +func nestingDepthError(p, depth int) error { + return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go new file mode 100644 index 00000000000..59ddfc44858 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -0,0 +1,653 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "errors" + "fmt" + "io" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +type ExtJSONValueReaderPool struct { + pool sync.Pool +} + +// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { + return &ExtJSONValueReaderPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(extJSONValueReader) + }, + }, + } +} + +// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { + vr := bvrp.pool.Get().(*extJSONValueReader) + return vr.reset(r, canonical) +} + +// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing +// is inserted into the pool and ok will be false. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { + bvr, ok := vr.(*extJSONValueReader) + if !ok { + return false + } + + bvr, _ = bvr.reset(nil, false) + bvrp.pool.Put(bvr) + return true +} + +type ejvrState struct { + mode mode + vType bsontype.Type + depth int +} + +// extJSONValueReader is for reading extended JSON. +type extJSONValueReader struct { + p *extJSONParser + + stack []ejvrState + frame int +} + +// NewExtJSONValueReader creates a new ValueReader from a given io.Reader +// It will interpret the JSON of r as canonical or relaxed according to the +// given canonical flag +func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { + return newExtJSONValueReader(r, canonical) +} + +func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { + ejvr := new(extJSONValueReader) + return ejvr.reset(r, canonical) +} + +func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { + p := newExtJSONParser(r, canonical) + typ, err := p.peekType() + + if err != nil { + return nil, ErrInvalidJSON + } + + var m mode + switch typ { + case bsontype.EmbeddedDocument: + m = mTopLevel + case bsontype.Array: + m = mArray + default: + m = mValue + } + + stack := make([]ejvrState, 1, 5) + stack[0] = ejvrState{ + mode: m, + vType: typ, + } + return &extJSONValueReader{ + p: p, + stack: stack, + }, nil +} + +func (ejvr *extJSONValueReader) advanceFrame() { + if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack + length := len(ejvr.stack) + if length+1 >= cap(ejvr.stack) { + // double it + buf := make([]ejvrState, 2*cap(ejvr.stack)+1) + copy(buf, ejvr.stack) + ejvr.stack = buf + } + ejvr.stack = ejvr.stack[:length+1] + } + ejvr.frame++ + + // Clean the stack + ejvr.stack[ejvr.frame].mode = 0 + ejvr.stack[ejvr.frame].vType = 0 + ejvr.stack[ejvr.frame].depth = 0 +} + +func (ejvr *extJSONValueReader) pushDocument() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mDocument + ejvr.stack[ejvr.frame].depth = ejvr.p.depth +} + +func (ejvr *extJSONValueReader) pushCodeWithScope() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mCodeWithScope +} + +func (ejvr *extJSONValueReader) pushArray() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mArray +} + +func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = m + ejvr.stack[ejvr.frame].vType = t +} + +func (ejvr *extJSONValueReader) pop() { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + ejvr.frame-- + case mDocument, mArray, mCodeWithScope: + ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... + } +} + +func (ejvr *extJSONValueReader) skipObject() { + // read entire object until depth returns to 0 (last ending } or ] seen) + depth := 1 + for depth > 0 { + ejvr.p.advanceState() + + // If object is empty, raise depth and continue. When emptyObject is true, the + // parser has already read both the opening and closing brackets of an empty + // object ("{}"), so the next valid token will be part of the parent document, + // not part of the nested document. + // + // If there is a comma, there are remaining fields, emptyObject must be set back + // to false, and comma must be skipped with advanceState(). + if ejvr.p.emptyObject { + if ejvr.p.s == jpsSawComma { + ejvr.p.emptyObject = false + ejvr.p.advanceState() + } + depth-- + continue + } + + switch ejvr.p.s { + case jpsSawBeginObject, jpsSawBeginArray: + depth++ + case jpsSawEndObject, jpsSawEndArray: + depth-- + } + } +} + +func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { + te := TransitionError{ + name: name, + current: ejvr.stack[ejvr.frame].mode, + destination: destination, + modes: modes, + action: "read", + } + if ejvr.frame != 0 { + te.parent = ejvr.stack[ejvr.frame-1].mode + } + return te +} + +func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { + return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) +} + +func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != t { + return ejvr.typeError(t) + } + default: + modes := []mode{mElement, mValue} + if addModes != nil { + modes = append(modes, addModes...) + } + return ejvr.invalidTransitionErr(destination, callerName, modes) + } + + return nil +} + +func (ejvr *extJSONValueReader) Type() bsontype.Type { + return ejvr.stack[ejvr.frame].vType +} + +func (ejvr *extJSONValueReader) Skip() error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + default: + return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) + } + + defer ejvr.pop() + + t := ejvr.stack[ejvr.frame].vType + switch t { + case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: + // read entire array, doc or CodeWithScope + ejvr.skipObject() + default: + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + } + + return nil +} + +func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: // allow reading array from top level + case mArray: + return ejvr, nil + default: + if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { + return nil, err + } + } + + ejvr.pushArray() + + return ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { + if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { + return nil, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Binary) + if err != nil { + return nil, 0, err + } + + b, btype, err = v.parseBinary() + + ejvr.pop() + return b, btype, err +} + +func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { + if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { + return false, err + } + + v, err := ejvr.p.readValue(bsontype.Boolean) + if err != nil { + return false, err + } + + if v.t != bsontype.Boolean { + return false, fmt.Errorf("expected type bool, but got type %s", v.t) + } + + ejvr.pop() + return v.v.(bool), nil +} + +func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: + return ejvr, nil + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { + return nil, ejvr.typeError(bsontype.EmbeddedDocument) + } + + ejvr.pushDocument() + return ejvr, nil + default: + return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) + } +} + +func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { + if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { + return "", nil, err + } + + v, err := ejvr.p.readValue(bsontype.CodeWithScope) + if err != nil { + return "", nil, err + } + + code, err = v.parseJavascript() + + ejvr.pushCodeWithScope() + return code, ejvr, err +} + +func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { + if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { + return "", primitive.NilObjectID, err + } + + v, err := ejvr.p.readValue(bsontype.DBPointer) + if err != nil { + return "", primitive.NilObjectID, err + } + + ns, oid, err = v.parseDBPointer() + + ejvr.pop() + return ns, oid, err +} + +func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.DateTime) + if err != nil { + return 0, err + } + + d, err := v.parseDateTime() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { + if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { + return primitive.Decimal128{}, err + } + + v, err := ejvr.p.readValue(bsontype.Decimal128) + if err != nil { + return primitive.Decimal128{}, err + } + + d, err := v.parseDecimal128() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { + if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Double) + if err != nil { + return 0, err + } + + d, err := v.parseDouble() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { + if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int32) + if err != nil { + return 0, err + } + + i, err := v.parseInt32() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int64) + if err != nil { + return 0, err + } + + i, err := v.parseInt64() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { + if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.JavaScript) + if err != nil { + return "", err + } + + code, err = v.parseJavascript() + + ejvr.pop() + return code, err +} + +func (ejvr *extJSONValueReader) ReadMaxKey() error { + if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MaxKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("max") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadMinKey() error { + if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MinKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("min") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadNull() error { + if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Null) + if err != nil { + return err + } + + if v.t != bsontype.Null { + return fmt.Errorf("expected type null but got type %s", v.t) + } + + ejvr.pop() + return nil +} + +func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { + if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { + return primitive.ObjectID{}, err + } + + v, err := ejvr.p.readValue(bsontype.ObjectID) + if err != nil { + return primitive.ObjectID{}, err + } + + oid, err := v.parseObjectID() + + ejvr.pop() + return oid, err +} + +func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { + if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { + return "", "", err + } + + v, err := ejvr.p.readValue(bsontype.Regex) + if err != nil { + return "", "", err + } + + pattern, options, err = v.parseRegex() + + ejvr.pop() + return pattern, options, err +} + +func (ejvr *extJSONValueReader) ReadString() (string, error) { + if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.String) + if err != nil { + return "", err + } + + if v.t != bsontype.String { + return "", fmt.Errorf("expected type string but got type %s", v.t) + } + + ejvr.pop() + return v.v.(string), nil +} + +func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { + if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.Symbol) + if err != nil { + return "", err + } + + symbol, err = v.parseSymbol() + + ejvr.pop() + return symbol, err +} + +func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { + if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { + return 0, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Timestamp) + if err != nil { + return 0, 0, err + } + + t, i, err = v.parseTimestamp() + + ejvr.pop() + return t, i, err +} + +func (ejvr *extJSONValueReader) ReadUndefined() error { + if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Undefined) + if err != nil { + return err + } + + err = v.parseUndefined() + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel, mDocument, mCodeWithScope: + default: + return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) + } + + name, t, err := ejvr.p.readKey() + + if err != nil { + if errors.Is(err, ErrEOD) { + if ejvr.stack[ejvr.frame].mode == mCodeWithScope { + _, err := ejvr.p.peekType() + if err != nil { + return "", nil, err + } + } + + ejvr.pop() + } + + return "", nil, err + } + + ejvr.push(mElement, t) + return name, ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mArray: + default: + return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) + } + + t, err := ejvr.p.peekType() + if err != nil { + if errors.Is(err, ErrEOA) { + ejvr.pop() + } + + return nil, err + } + + ejvr.push(mValue, t) + return ejvr, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go new file mode 100644 index 00000000000..ba39c9601fb --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go @@ -0,0 +1,223 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/golang/go by The Go Authors +// See THIRD-PARTY-NOTICES for original license terms. + +package bsonrw + +import "unicode/utf8" + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML